hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f4a2510555d14215c6a77a34b928cda043c130 | 4,628 | py | Python | bot/reviewbot/tools/jshint.py | reviewboard/ReviewBot | 6c529706229da647cc8cdef27db75cebc0abf216 | [
"MIT"
] | 91 | 2015-04-30T21:00:40.000Z | 2022-03-30T07:19:03.000Z | bot/reviewbot/tools/jshint.py | reviewboard/ReviewBot | 6c529706229da647cc8cdef27db75cebc0abf216 | [
"MIT"
] | 11 | 2015-01-08T13:48:21.000Z | 2018-07-03T13:18:35.000Z | bot/reviewbot/tools/jshint.py | reviewboard/ReviewBot | 6c529706229da647cc8cdef27db75cebc0abf216 | [
"MIT"
] | 23 | 2015-04-03T17:17:00.000Z | 2022-03-07T08:14:27.000Z | from __future__ import unicode_literals
import json
import os
from reviewbot.config import config
from reviewbot.tools.base import BaseTool, FilePatternsFromSettingMixin
from reviewbot.utils.filesystem import make_tempfile
from reviewbot.utils.process import execute
class JSHintTool(FilePatternsFromSettingMixin, BaseTool):
"""Review Bot tool to run jshint."""
name = 'JSHint'
version = '1.0'
description = ('Checks JavaScript code for style errors and potential '
'problems using JSHint, a JavaScript Code Quality Tool.')
timeout = 30
exe_dependencies = ['jshint']
file_patterns = ['*.js']
file_extension_setting = ['extra_ext_checks']
options = [
{
'name': 'extra_ext_checks',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Extra File Extensions',
'help_text': ('A comma-separated list of extra file '
'extensions to check (only .js is included by '
'default).'),
'required': False,
},
},
{
'name': 'extract_js_from_html',
'field_type': 'django.forms.ChoiceField',
'field_options': {
'label': 'Extract JavaScript from HTML',
'help_text': ('Whether JSHint should extract JavaScript from '
'HTML files. If set to "auto", it will only try '
'extracting JavaScript if the file looks like '
'an HTML file.'),
'choices': (
('auto', 'auto'),
('always', 'always'),
('never', 'never'),
),
'initial': 'never',
'required': False,
},
},
{
'name': 'config',
'field_type': 'djblets.db.fields.JSONFormField',
'default': '',
'field_options': {
'label': 'Configuration',
'help_text': ('JSON specifying which JSHint options to turn '
'on or off. (This is equivalent to the contents '
'of a .jshintrc file.)'),
'required': False,
},
'widget': {
'type': 'django.forms.Textarea',
'attrs': {
'cols': 70,
'rows': 10,
},
},
},
]
REPORTER_PATH = os.path.abspath(os.path.join(__file__, '..', 'support',
'js', 'jshint_reporter.js'))
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
If a custom JSHint configuration is set, this will save it to a
temporary file and pass it along for all JSHint runs.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
settings = self.settings
cmd = [
config['exe_paths']['jshint'],
'--extract=%s' % settings['extract_js_from_html'],
'--reporter=%s' % self.REPORTER_PATH,
]
# If any configuration was specified, create a temporary config file.
# This will be used for each file.
config_content = self.settings['config']
if config_content:
cmd.append('--config=%s'
% make_tempfile(content=config_content.encode('utf-8')))
return cmd
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run JSHint.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output = execute(base_command + [path],
ignore_errors=True)
if output:
errors = json.loads(output)
for error in errors:
f.comment(text=error['msg'],
first_line=error['line'],
start_column=error['column'],
error_code=error['code'])
| 33.057143 | 79 | 0.496543 | from __future__ import unicode_literals
import json
import os
from reviewbot.config import config
from reviewbot.tools.base import BaseTool, FilePatternsFromSettingMixin
from reviewbot.utils.filesystem import make_tempfile
from reviewbot.utils.process import execute
class JSHintTool(FilePatternsFromSettingMixin, BaseTool):
name = 'JSHint'
version = '1.0'
description = ('Checks JavaScript code for style errors and potential '
'problems using JSHint, a JavaScript Code Quality Tool.')
timeout = 30
exe_dependencies = ['jshint']
file_patterns = ['*.js']
file_extension_setting = ['extra_ext_checks']
options = [
{
'name': 'extra_ext_checks',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Extra File Extensions',
'help_text': ('A comma-separated list of extra file '
'extensions to check (only .js is included by '
'default).'),
'required': False,
},
},
{
'name': 'extract_js_from_html',
'field_type': 'django.forms.ChoiceField',
'field_options': {
'label': 'Extract JavaScript from HTML',
'help_text': ('Whether JSHint should extract JavaScript from '
'HTML files. If set to "auto", it will only try '
'extracting JavaScript if the file looks like '
'an HTML file.'),
'choices': (
('auto', 'auto'),
('always', 'always'),
('never', 'never'),
),
'initial': 'never',
'required': False,
},
},
{
'name': 'config',
'field_type': 'djblets.db.fields.JSONFormField',
'default': '',
'field_options': {
'label': 'Configuration',
'help_text': ('JSON specifying which JSHint options to turn '
'on or off. (This is equivalent to the contents '
'of a .jshintrc file.)'),
'required': False,
},
'widget': {
'type': 'django.forms.Textarea',
'attrs': {
'cols': 70,
'rows': 10,
},
},
},
]
REPORTER_PATH = os.path.abspath(os.path.join(__file__, '..', 'support',
'js', 'jshint_reporter.js'))
def build_base_command(self, **kwargs):
settings = self.settings
cmd = [
config['exe_paths']['jshint'],
'--extract=%s' % settings['extract_js_from_html'],
'--reporter=%s' % self.REPORTER_PATH,
]
config_content = self.settings['config']
if config_content:
cmd.append('--config=%s'
% make_tempfile(content=config_content.encode('utf-8')))
return cmd
def handle_file(self, f, path, base_command, **kwargs):
output = execute(base_command + [path],
ignore_errors=True)
if output:
errors = json.loads(output)
for error in errors:
f.comment(text=error['msg'],
first_line=error['line'],
start_column=error['column'],
error_code=error['code'])
| true | true |
f7f4a2c03ded93e13996f0859423af03e3096b1a | 106,592 | py | Python | api/migrations/0001_initial.py | LuchaComics/comicscantina-django | 78e630000fb1e1f6299f80655c8f57a496236ab1 | [
"BSD-2-Clause"
] | null | null | null | api/migrations/0001_initial.py | LuchaComics/comicscantina-django | 78e630000fb1e1f6299f80655c8f57a496236ab1 | [
"BSD-2-Clause"
] | 5 | 2021-03-19T02:58:32.000Z | 2022-03-11T23:57:30.000Z | api/migrations/0001_initial.py | lendierickx/comics-django | 3f5c6e85c89ff157dbf67179ea3b9007d7de446c | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import django.db.models.deletion
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BannedDomain',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, db_index=True, max_length=63)),
('banned_on', models.DateTimeField(auto_now_add=True)),
('reason', models.CharField(null=True, blank=True, max_length=127)),
],
options={
'db_table': 'ec_banned_domains',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='BannedIP',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('address', models.GenericIPAddressField(unique=True, db_index=True)),
('banned_on', models.DateTimeField(auto_now_add=True)),
('reason', models.CharField(null=True, blank=True, max_length=127)),
],
options={
'db_table': 'ec_banned_ips',
'ordering': ('address',),
},
),
migrations.CreateModel(
name='BannedWord',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('text', models.CharField(unique=True, db_index=True, max_length=63)),
('banned_on', models.DateTimeField(auto_now_add=True)),
('reason', models.CharField(null=True, blank=True, max_length=127)),
],
options={
'db_table': 'ec_banned_words',
'ordering': ('text',),
},
),
migrations.CreateModel(
name='Brand',
fields=[
('brand_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=127)),
],
options={
'db_table': 'ec_brands',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='CatalogItem',
fields=[
('catalog_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=127)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Comic'), (2, 'Furniture'), (3, 'Coin')], default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)], db_index=True)),
('description', models.TextField(default='', blank=True)),
('brand_name', models.CharField(db_index=True, max_length=127)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('length_in_meters', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('width_in_meters', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('height_in_meters', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('weight_in_kilograms', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('volume_in_litres', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('materials', models.CharField(null=True, blank=True, max_length=127)),
('is_tangible', models.BooleanField(default=True)),
('is_flammable', models.BooleanField(default=False)),
('is_biohazard', models.BooleanField(default=False)),
('is_toxic', models.BooleanField(default=False)),
('is_explosive', models.BooleanField(default=False)),
('is_corrosive', models.BooleanField(default=False)),
('is_volatile', models.BooleanField(default=False)),
('is_radioactive', models.BooleanField(default=False)),
('is_restricted', models.BooleanField(default=False)),
('restrictions', models.TextField(default='', blank=True)),
],
options={
'db_table': 'ec_catalog_items',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Category',
fields=[
('category_id', models.AutoField(serialize=False, primary_key=True)),
('parent_id', models.PositiveIntegerField(default=0)),
('name', models.CharField(max_length=127)),
],
options={
'db_table': 'ec_categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Comic',
fields=[
('comic_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('is_cgc_rated', models.BooleanField(default=False)),
('age', models.PositiveSmallIntegerField(choices=[(1, 'Gold'), (2, 'Silver'), (3, 'Bronze'), (4, 'Copper')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(4)], blank=True)),
('cgc_rating', models.FloatField(choices=[(10.0, '10.0'), (9.9, '9.9'), (9.8, '9.8'), (9.6, '9.6'), (9.4, '9.4'), (9.2, '9.2'), (9.0, '9.0'), (8.5, '8.5'), (8.0, '8.0'), (7.5, '7.5'), (7.0, '7.0'), (6.5, '6.5'), (6.0, '6.0'), (5.5, '5.5'), (5.0, '5.0'), (4.5, '4.5'), (4.0, '4.0'), (3.5, '3.5'), (3.0, '3.0'), (2.5, '2.5'), (2.0, '2.0'), (1.8, '1.8'), (1.5, '1.5'), (1.0, '1.0'), (0.5, '.5'), (0, 'NR')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)], blank=True)),
('label_colour', models.CharField(choices=[('Purple', 'Purple'), ('Red', 'Red'), ('Blue', 'Blue'), ('Yellow', 'Yellow')], null=True, blank=True, max_length=63)),
('condition_rating', models.PositiveSmallIntegerField(choices=[(10, 'Near Mint'), (8, 'Very Fine'), (6, 'Fine'), (4, 'Very Good'), (2, 'Good'), (1, 'Fair'), (0, 'Poor')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)], blank=True)),
('is_canadian_priced_variant', models.BooleanField(default=False)),
('is_variant_cover', models.BooleanField(default=False)),
('is_retail_incentive_variant', models.BooleanField(default=False)),
('is_newsstand_edition', models.BooleanField(default=False)),
('catalog', models.ForeignKey(to='api.CatalogItem', blank=True, null=True)),
],
options={
'db_table': 'ec_comics',
'ordering': ('issue',),
},
),
migrations.CreateModel(
name='Customer',
fields=[
('customer_id', models.AutoField(serialize=False, primary_key=True)),
('joined', models.DateTimeField(db_index=True, auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False)),
('is_tos_signed', models.BooleanField(default=False)),
('wants_newsletter', models.BooleanField(default=False)),
('wants_flyers', models.BooleanField(default=False)),
('is_verified', models.BooleanField(default=False)),
('verification_key', models.CharField(default='', blank=True, max_length=63)),
('first_name', models.CharField(db_index=True, max_length=63)),
('last_name', models.CharField(db_index=True, max_length=63)),
('email', models.EmailField(unique=True, null=True, blank=True, db_index=True, max_length=254)),
('date_of_birth', models.DateField(default=datetime.datetime.now)),
('billing_phone', models.CharField(null=True, blank=True, db_index=True, max_length=10)),
('billing_street_name', models.CharField(max_length=63)),
('billing_street_number', models.CharField(max_length=15)),
('billing_unit_number', models.CharField(null=True, blank=True, max_length=15)),
('billing_city', models.CharField(max_length=63)),
('billing_province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('billing_country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('billing_postal', models.CharField(db_index=True, max_length=31)),
('is_shipping_same_as_billing', models.BooleanField(default=False)),
('shipping_phone', models.CharField(null=True, blank=True, db_index=True, max_length=10)),
('shipping_street_name', models.CharField(max_length=63)),
('shipping_street_number', models.CharField(max_length=15)),
('shipping_unit_number', models.CharField(null=True, blank=True, max_length=15)),
('shipping_city', models.CharField(max_length=63)),
('shipping_province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('shipping_country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('shipping_postal', models.CharField(db_index=True, max_length=31)),
('has_consented', models.BooleanField(default=False)),
('qrcode', models.ImageField(null=True, blank=True, upload_to='qrcode')),
],
options={
'db_table': 'ec_customers',
'ordering': ('last_name', 'first_name'),
},
),
migrations.CreateModel(
name='EmailSubscription',
fields=[
('subscription_id', models.AutoField(serialize=False, primary_key=True)),
('email', models.EmailField(unique=True, db_index=True, max_length=254)),
('submission_date', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'ec_email_subscriptions',
'ordering': ('submission_date',),
},
),
migrations.CreateModel(
name='Employee',
fields=[
('employee_id', models.AutoField(serialize=False, primary_key=True)),
('role', models.PositiveSmallIntegerField(choices=[(0, 'Owner'), (1, 'Manager'), (2, 'Worker')], default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(3)])),
('is_verified', models.BooleanField(default=False)),
('verification_key', models.CharField(default='', blank=True, max_length=63)),
('joined', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False)),
('is_tos_signed', models.BooleanField(default=False)),
],
options={
'db_table': 'ec_employees',
'ordering': ('employee_id',),
},
),
migrations.CreateModel(
name='GCDBrand',
fields=[
('brand_id', models.AutoField(serialize=False, primary_key=True)),
('issue_count', models.IntegerField(default=0)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.IntegerField(null=True, db_index=True)),
('year_ended', models.IntegerField(null=True)),
('year_began_uncertain', models.BooleanField(db_index=True)),
('year_ended_uncertain', models.BooleanField(db_index=True)),
('notes', models.TextField()),
('keywords', models.TextField(null=True)),
('url', models.URLField(default='', blank=True, max_length=255)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
],
options={
'db_table': 'gcd_brands',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDBrandEmblemGroup',
fields=[
('brand_emblem_group_id', models.AutoField(serialize=False, primary_key=True)),
('brand', models.ForeignKey(to='api.GCDBrand', null=True)),
],
options={
'db_table': 'gcd_brand_emblem_groups',
'ordering': ('brand',),
},
),
migrations.CreateModel(
name='GCDBrandGroup',
fields=[
('brand_group_id', models.AutoField(serialize=False, primary_key=True)),
('issue_count', models.IntegerField(default=0)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.IntegerField(null=True, db_index=True)),
('year_ended', models.IntegerField(null=True)),
('year_began_uncertain', models.BooleanField(db_index=True)),
('year_ended_uncertain', models.BooleanField(db_index=True)),
('notes', models.TextField()),
('keywords', models.TextField(null=True)),
('url', models.URLField(default='', blank=True, max_length=255)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
],
options={
'db_table': 'gcd_brand_groups',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDBrandUse',
fields=[
('brand_use_id', models.AutoField(serialize=False, primary_key=True)),
('year_began', models.IntegerField(null=True, db_index=True)),
('year_ended', models.IntegerField(null=True)),
('year_began_uncertain', models.BooleanField(db_index=True)),
('year_ended_uncertain', models.BooleanField(db_index=True)),
('notes', models.TextField()),
('reserved', models.BooleanField(default=0, db_index=True)),
('created', models.DateField(auto_now_add=True)),
('modified', models.DateField(auto_now=True)),
('emblem', models.ForeignKey(related_name='in_use', to='api.GCDBrand')),
],
options={
'db_table': 'gcd_brand_uses',
'ordering': ('publisher',),
},
),
migrations.CreateModel(
name='GCDCountry',
fields=[
('country_id', models.AutoField(serialize=False, primary_key=True)),
('code', models.CharField(unique=True, max_length=10)),
('name', models.CharField(db_index=True, max_length=255)),
],
options={
'db_table': 'gcd_countries',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDImage',
fields=[
('image_id', models.AutoField(serialize=False, primary_key=True)),
('type', models.CharField(db_index=True, max_length=255)),
('file', models.FileField(null=True, upload_to='uploads')),
],
options={
'db_table': 'gcd_images',
},
),
migrations.CreateModel(
name='GCDIndiciaPublisher',
fields=[
('indicia_publisher_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.PositiveSmallIntegerField(null=True, db_index=True)),
('year_ended', models.PositiveSmallIntegerField(null=True)),
('year_began_uncertain', models.BooleanField(default=False, db_index=True)),
('year_ended_uncertain', models.BooleanField(default=False, db_index=True)),
('notes', models.TextField(null=True, blank=True)),
('url', models.URLField(default='', null=True, blank=True, max_length=255)),
('is_surrogate', models.BooleanField(db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('imprint_count', models.IntegerField(default=0)),
('brand_count', models.IntegerField(default=0, db_index=True)),
('indicia_publisher_count', models.IntegerField(default=0, db_index=True)),
('series_count', models.IntegerField(default=0)),
('issue_count', models.IntegerField(default=0)),
('country', models.ForeignKey(to='api.GCDCountry')),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
],
options={
'db_table': 'gcd_indicia_publishers',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDIssue',
fields=[
('issue_id', models.AutoField(serialize=False, primary_key=True)),
('number', models.CharField(db_index=True, max_length=50)),
('title', models.CharField(db_index=True, max_length=255)),
('no_title', models.BooleanField(default=False, db_index=True)),
('volume', models.CharField(db_index=True, max_length=50)),
('no_volume', models.BooleanField(default=False, db_index=True)),
('display_volume_with_number', models.BooleanField(default=False, db_index=True)),
('isbn', models.CharField(db_index=True, max_length=32)),
('no_isbn', models.BooleanField(default=False, db_index=True)),
('valid_isbn', models.CharField(db_index=True, max_length=13)),
('variant_of_id', models.IntegerField(default=0, db_index=True)),
('variant_name', models.CharField(max_length=255)),
('barcode', models.CharField(db_index=True, max_length=38)),
('no_barcode', models.BooleanField(default=False)),
('rating', models.CharField(default='', db_index=True, max_length=255)),
('no_rating', models.BooleanField(default=False, db_index=True)),
('is_first_issue', models.BooleanField(default=False)),
('is_last_issue', models.BooleanField(default=False)),
('publication_date', models.CharField(max_length=255)),
('key_date', models.CharField(db_index=True, max_length=10)),
('on_sale_date', models.CharField(db_index=True, max_length=10)),
('on_sale_date_uncertain', models.BooleanField(default=False)),
('sort_code', models.IntegerField(db_index=True)),
('indicia_frequency', models.CharField(max_length=255)),
('no_indicia_frequency', models.BooleanField(default=False, db_index=True)),
('price', models.CharField(max_length=255)),
('page_count', models.DecimalField(max_digits=10, null=True, decimal_places=3)),
('page_count_uncertain', models.BooleanField(default=False)),
('editing', models.TextField()),
('no_editing', models.BooleanField(default=False, db_index=True)),
('notes', models.TextField(null=True)),
('keywords', models.TextField(null=True)),
('is_indexed', models.IntegerField(default=0, db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('indicia_pub_not_printed', models.BooleanField(default=False)),
('no_brand', models.BooleanField(default=False, db_index=True)),
('small_url', models.URLField(null=True, blank=True, max_length=255)),
('medium_url', models.URLField(null=True, blank=True, max_length=255)),
('large_url', models.URLField(null=True, blank=True, max_length=255)),
('alt_small_url', models.URLField(null=True, blank=True, max_length=255)),
('alt_medium_url', models.URLField(null=True, blank=True, max_length=255)),
('alt_large_url', models.URLField(null=True, blank=True, max_length=255)),
('has_alternative', models.BooleanField(default=False)),
('publisher_name', models.CharField(db_index=True, max_length=255)),
('genre', models.CharField(null=True, blank=True, db_index=True, max_length=255)),
('product_name', models.CharField(null=True, blank=True, db_index=True, max_length=511)),
('brand', models.ForeignKey(to='api.GCDBrand', null=True)),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
('indicia_publisher', models.ForeignKey(to='api.GCDIndiciaPublisher', null=True)),
],
options={
'db_table': 'gcd_issues',
'ordering': ['series', 'sort_code'],
},
),
migrations.CreateModel(
name='GCDLanguage',
fields=[
('language_id', models.AutoField(serialize=False, primary_key=True)),
('code', models.CharField(unique=True, max_length=10)),
('name', models.CharField(db_index=True, max_length=255)),
],
options={
'db_table': 'gcd_languages',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDPublisher',
fields=[
('publisher_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.PositiveSmallIntegerField(null=True, db_index=True)),
('year_ended', models.PositiveSmallIntegerField(null=True)),
('year_began_uncertain', models.BooleanField(default=False, db_index=True)),
('year_ended_uncertain', models.BooleanField(default=False, db_index=True)),
('notes', models.TextField(null=True, blank=True)),
('url', models.URLField(default='', null=True, blank=True, max_length=255)),
('is_master', models.BooleanField(default=False, db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('imprint_count', models.IntegerField(default=0)),
('brand_count', models.IntegerField(default=0, db_index=True)),
('indicia_publisher_count', models.IntegerField(default=0, db_index=True)),
('series_count', models.IntegerField(default=0)),
('issue_count', models.IntegerField(default=0)),
('country', models.ForeignKey(to='api.GCDCountry')),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
('parent', models.ForeignKey(related_name='imprint_set', to='api.GCDPublisher', null=True)),
],
options={
'db_table': 'gcd_publishers',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDSeries',
fields=[
('series_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=255)),
('sort_name', models.CharField(db_index=True, max_length=255)),
('format', models.CharField(default='', max_length=255)),
('color', models.CharField(default='', max_length=255)),
('dimensions', models.CharField(default='', max_length=255)),
('paper_stock', models.CharField(default='', max_length=255)),
('binding', models.CharField(default='', max_length=255)),
('publishing_format', models.CharField(default='', max_length=255)),
('tracking_notes', models.TextField(null=True, blank=True)),
('notes', models.TextField(null=True, blank=True)),
('publication_notes', models.TextField(null=True, blank=True)),
('keywords', models.TextField(null=True, blank=True)),
('year_began', models.IntegerField(db_index=True)),
('year_ended', models.IntegerField(default=0, null=True, blank=True)),
('year_began_uncertain', models.BooleanField(default=False)),
('year_ended_uncertain', models.BooleanField(default=False)),
('publication_dates', models.CharField(max_length=255)),
('has_barcode', models.BooleanField(default=False)),
('has_indicia_frequency', models.BooleanField(default=False)),
('has_isbn', models.BooleanField(default=False)),
('has_issue_title', models.BooleanField(default=False)),
('has_volume', models.BooleanField(default=False)),
('has_rating', models.BooleanField(default=False)),
('is_current', models.BooleanField(default=False)),
('is_comics_publication', models.BooleanField(default=False)),
('is_singleton', models.BooleanField(default=False)),
('issue_count', models.IntegerField(default=0, null=True, blank=True)),
('has_gallery', models.BooleanField(default=False, db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('open_reserve', models.IntegerField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('cover_url', models.URLField(null=True, blank=True, max_length=255)),
('publication_type_id', models.IntegerField(null=True, blank=0)),
('publisher_name', models.CharField(db_index=True, max_length=255)),
('country', models.ForeignKey(to='api.GCDCountry')),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
('language', models.ForeignKey(to='api.GCDLanguage')),
('publisher', models.ForeignKey(to='api.GCDPublisher')),
],
options={
'db_table': 'gcd_series',
'ordering': ['sort_name', 'year_began'],
},
),
migrations.CreateModel(
name='GCDStory',
fields=[
('story_id', models.AutoField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=255)),
('title_inferred', models.BooleanField(default=False, db_index=True)),
('feature', models.CharField(max_length=255)),
('sequence_number', models.IntegerField()),
('page_count', models.DecimalField(max_digits=10, null=True, decimal_places=3, db_index=True)),
('page_count_uncertain', models.BooleanField(default=False, db_index=True)),
('script', models.TextField()),
('pencils', models.TextField()),
('inks', models.TextField()),
('colors', models.TextField()),
('letters', models.TextField()),
('editing', models.TextField()),
('no_script', models.BooleanField(default=False, db_index=True)),
('no_pencils', models.BooleanField(default=False, db_index=True)),
('no_inks', models.BooleanField(default=False, db_index=True)),
('no_colors', models.BooleanField(default=False, db_index=True)),
('no_letters', models.BooleanField(default=False, db_index=True)),
('no_editing', models.BooleanField(default=False, db_index=True)),
('job_number', models.CharField(max_length=25)),
('genre', models.CharField(max_length=255)),
('characters', models.TextField()),
('synopsis', models.TextField()),
('reprint_notes', models.TextField()),
('notes', models.TextField()),
('keywords', models.TextField(null=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('issue', models.ForeignKey(to='api.GCDIssue')),
],
options={
'db_table': 'gcd_stories',
'ordering': ('sequence_number',),
},
),
migrations.CreateModel(
name='GCDStoryType',
fields=[
('story_type_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, db_index=True, max_length=50)),
('sort_code', models.IntegerField(unique=True)),
],
options={
'db_table': 'gcd_story_types',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='HelpRequest',
fields=[
('help_id', models.AutoField(serialize=False, primary_key=True)),
('subject', models.PositiveSmallIntegerField(choices=[(1, 'Feedback'), (2, 'Error'), (3, 'Checkout'), (4, 'Inventory'), (5, 'Pull List'), (6, 'Sales'), (7, 'Emailing List'), (8, 'Store Settings / Users'), (9, 'Dashboard')], default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
('subject_url', models.URLField(null=True, blank=True)),
('message', models.TextField()),
('submission_date', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(to='api.Customer', blank=True, null=True)),
('employee', models.ForeignKey(to='api.Employee', blank=True, null=True)),
],
options={
'db_table': 'ec_help_requests',
'ordering': ('submission_date',),
},
),
migrations.CreateModel(
name='ImageBinaryUpload',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_index=True)),
('created', models.DateField(null=True, auto_now=True)),
('file_type', models.CharField(choices=[('png', 'Portable Network Graphics (PNG)'), ('jpeg', 'Joint Photographic Experts Group picture (JPEG)'), ('jpg', 'Joint Photographic Experts Group picture (JPG)'), ('bmp', 'Bitmap Image File (BMP)'), ('tiff', 'Tagged Image File Format (TIFF)'), ('gif', 'Graphics Interchange Format (GIF)')], db_index=True, max_length=4)),
('mime_type', models.CharField(choices=[('image/png', 'PNG'), ('image/jpeg', 'JPEG/JPG'), ('image/bmp', 'BMP'), ('image/tiff', 'TIFF'), ('image/gif', 'GIF')], default='image/jpeg', db_index=True, max_length=15)),
('data', models.BinaryField()),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'ec_image_binary_uploads',
},
),
migrations.CreateModel(
name='ImageUpload',
fields=[
('upload_id', models.AutoField(serialize=False, primary_key=True)),
('upload_date', models.DateField(null=True, auto_now=True)),
('is_assigned', models.BooleanField(default=False)),
('image', models.ImageField(null=True, blank=True, upload_to='upload')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'ec_image_uploads',
'ordering': ('upload_date',),
},
),
migrations.CreateModel(
name='Organization',
fields=[
('org_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('description', models.TextField(null=True, blank=True)),
('joined', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False, db_index=True)),
('is_listed', models.BooleanField(default=True, db_index=True)),
('street_name', models.CharField(max_length=63)),
('street_number', models.CharField(null=True, blank=True, max_length=31)),
('unit_number', models.CharField(null=True, blank=True, max_length=15)),
('city', models.CharField(max_length=63)),
('province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('postal', models.CharField(max_length=31)),
('currency', models.PositiveSmallIntegerField(choices=[(124, 'CAD'), (840, 'USD')], default=124)),
('language', models.CharField(choices=[('EN', 'English')], default='EN', max_length=2)),
('website', models.URLField(null=True, blank=True)),
('email', models.EmailField(null=True, blank=True, max_length=254)),
('phone', models.CharField(null=True, blank=True, max_length=10)),
('fax', models.CharField(null=True, blank=True, max_length=10)),
('twitter', models.CharField(null=True, blank=True, max_length=15)),
('facebook_url', models.URLField(null=True, blank=True)),
('instagram_url', models.URLField(null=True, blank=True)),
('linkedin_url', models.URLField(null=True, blank=True)),
('github_url', models.URLField(null=True, blank=True)),
('google_url', models.URLField(null=True, blank=True)),
('youtube_url', models.URLField(null=True, blank=True)),
('flickr_url', models.URLField(null=True, blank=True)),
('paypal_email', models.EmailField(max_length=254)),
('style', models.CharField(choices=[('ecantina-style-0.css', 'Green'), ('ecantina-style-1.css', 'Ligh Green'), ('ecantina-style-2.css', 'Aqua Green'), ('ecantina-style-3.css', 'Blue'), ('ecantina-style-4.css', 'Purple'), ('ecantina-style-5.css', 'Red'), ('ecantina-style-6.css', 'Dark Grey'), ('ecantina-style-7.css', 'Grey'), ('ecantina-style-8.css', 'Light Aqua Green'), ('ecantina-style-9.css', 'Yellow'), ('ecantina-style-10.css', 'Light Red'), ('ecantina-style-11.css', 'Dark Blue'), ('ecantina-style-black.css', 'Black')], default='ecantina-style-5.css', max_length=31)),
('administrator', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
('customers', models.ManyToManyField(to='api.Customer', blank=True)),
('header', models.ForeignKey(related_name='org_header', to='api.ImageUpload', blank=True, null=True)),
('logo', models.ForeignKey(related_name='org_logo', to='api.ImageUpload', blank=True, null=True)),
],
options={
'db_table': 'ec_organizations',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='OrgShippingPreference',
fields=[
('shipping_pref_id', models.AutoField(serialize=False, primary_key=True)),
('is_pickup_only', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_org_shipping_preferences',
'ordering': ('organization',),
},
),
migrations.CreateModel(
name='OrgShippingRate',
fields=[
('shipping_rate_id', models.AutoField(serialize=False, primary_key=True)),
('country', models.PositiveSmallIntegerField(choices=[(124, 'Canada'), (840, 'United States'), (484, 'Mexico')], null=True, validators=[django.core.validators.MinValueValidator(4), django.core.validators.MaxValueValidator(840)], blank=True)),
('comics_rate1', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate2', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate3', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate4', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate5', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate6', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate7', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate8', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate9', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate10', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_org_shipping_rates',
'ordering': ('country',),
},
),
migrations.CreateModel(
name='PrintHistory',
fields=[
('print_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('filename', models.CharField(db_index=True, max_length=127)),
('url', models.URLField()),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_print_history',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('product_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(null=True, blank=True, db_index=True, max_length=511)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Comic'), (2, 'Furniture'), (3, 'Coin')], default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)], db_index=True)),
('description', models.TextField(default='', blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_sold', models.BooleanField(default=False, db_index=True)),
('is_listed', models.BooleanField(default=True, db_index=True)),
('is_new', models.BooleanField(default=False, db_index=True)),
('is_featured', models.BooleanField(default=False, db_index=True)),
('sub_price', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('has_tax', models.BooleanField(default=True)),
('tax_rate', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('tax_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('sub_price_with_tax', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('discount_type', models.PositiveSmallIntegerField(choices=[(1, '%'), (2, '$')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('price', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('cost', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('currency', models.PositiveSmallIntegerField(choices=[(124, 'CAD'), (840, 'USD')], default=124)),
('language', models.CharField(choices=[('EN', 'English')], default='EN', max_length=2)),
('image_url', models.URLField(null=True, blank=True)),
('qrcode', models.ImageField(null=True, blank=True, upload_to='qrcode')),
('is_qrcode_printed', models.BooleanField(default=False)),
('has_no_shipping', models.BooleanField(default=False)),
('is_unlimited', models.BooleanField(default=False)),
('brand', models.ForeignKey(to='api.Brand', blank=True, null=True)),
('category', models.ForeignKey(to='api.Category')),
('image', models.ForeignKey(to='api.ImageUpload', blank=True, null=True)),
('images', models.ManyToManyField(related_name='product_images', to='api.ImageUpload', blank=True)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_products',
'ordering': ('product_id', 'type'),
},
),
migrations.CreateModel(
name='Promotion',
fields=[
('promotion_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('discount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount_type', models.PositiveSmallIntegerField(choices=[(1, '%'), (2, '$')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_promotions',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Pulllist',
fields=[
('pulllist_id', models.AutoField(serialize=False, primary_key=True)),
('organization', models.ForeignKey(to='api.Organization')),
('series', models.ForeignKey(to='api.GCDSeries', null=True)),
],
options={
'db_table': 'ec_pulllists',
'ordering': ('series',),
},
),
migrations.CreateModel(
name='PulllistSubscription',
fields=[
('subscription_id', models.AutoField(serialize=False, primary_key=True)),
('copies', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(to='api.Customer')),
('organization', models.ForeignKey(to='api.Organization')),
('pulllist', models.ForeignKey(to='api.Pulllist')),
],
options={
'db_table': 'ec_pulllists_subscriptions',
},
),
migrations.CreateModel(
name='Receipt',
fields=[
('receipt_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(db_index=True, auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('purchased', models.DateTimeField(null=True, blank=True, db_index=True)),
('comment', models.CharField(default='', null=True, blank=True, max_length=511)),
('has_purchased_online', models.BooleanField(default=False)),
('payment_method', models.PositiveSmallIntegerField(choices=[(1, 'Cash'), (2, 'Debit Card'), (3, 'Credit Card'), (4, 'Gift Card'), (5, 'Store Points'), (6, 'Cheque'), (7, 'PayPal'), (8, 'Invoice'), (9, 'Other')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9)])),
('status', models.PositiveSmallIntegerField(choices=[(1, 'New Order'), (2, 'Picked'), (3, 'Shipped'), (4, 'Received'), (5, 'In-Store Sale'), (6, 'Online Sale')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(6)], db_index=True)),
('has_shipping', models.BooleanField(default=False, db_index=True)),
('sub_total', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('has_tax', models.BooleanField(default=True)),
('tax_rate', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('tax_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('sub_total_with_tax', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('shipping_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('total_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('has_finished', models.BooleanField(default=False, db_index=True)),
('has_paid', models.BooleanField(default=False)),
('email', models.EmailField(null=True, blank=True, max_length=254)),
('billing_address', models.CharField(null=True, blank=True, max_length=63)),
('billing_phone', models.CharField(null=True, blank=True, max_length=10)),
('billing_city', models.CharField(null=True, blank=True, max_length=63)),
('billing_province', models.CharField(null=True, blank=True, max_length=63)),
('billing_country', models.CharField(null=True, blank=True, max_length=63)),
('billing_postal', models.CharField(null=True, blank=True, max_length=31)),
('shipping_address', models.CharField(null=True, blank=True, max_length=63)),
('shipping_phone', models.CharField(null=True, blank=True, max_length=10)),
('shipping_city', models.CharField(null=True, blank=True, max_length=63)),
('shipping_province', models.CharField(null=True, blank=True, max_length=63)),
('shipping_country', models.CharField(null=True, blank=True, max_length=63)),
('shipping_postal', models.CharField(null=True, blank=True, max_length=31)),
('has_error', models.BooleanField(default=False, db_index=True)),
('error', models.PositiveSmallIntegerField(choices=[(0, 'No Error'), (1, 'Cancelled Online Order')], default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('customer', models.ForeignKey(to='api.Customer', blank=True, null=True)),
('employee', models.ForeignKey(to='api.Employee', blank=True, null=True)),
('organization', models.ForeignKey(to='api.Organization', blank=True, null=True)),
('products', models.ManyToManyField(related_name='receipt_products', to='api.Product', blank=True)),
],
options={
'db_table': 'ec_receipts',
'ordering': ('last_updated',),
},
),
migrations.CreateModel(
name='Section',
fields=[
('section_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=127)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_sections',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Store',
fields=[
('store_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('description', models.TextField(null=True, blank=True)),
('joined', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False, db_index=True)),
('is_listed', models.BooleanField(default=True, db_index=True)),
('tax_rate', models.DecimalField(default=0.13, max_digits=10, decimal_places=2)),
('street_name', models.CharField(max_length=63)),
('street_number', models.CharField(null=True, blank=True, max_length=31)),
('unit_number', models.CharField(null=True, blank=True, max_length=15)),
('city', models.CharField(max_length=63)),
('province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('postal', models.CharField(max_length=31)),
('currency', models.PositiveSmallIntegerField(choices=[(124, 'CAD'), (840, 'USD')], default=124)),
('language', models.CharField(choices=[('EN', 'English')], default='EN', max_length=2)),
('website', models.URLField(null=True, blank=True)),
('email', models.EmailField(null=True, blank=True, max_length=254)),
('phone', models.CharField(null=True, blank=True, max_length=10)),
('fax', models.CharField(null=True, blank=True, max_length=10)),
('is_open_monday', models.BooleanField(default=False)),
('is_open_tuesday', models.BooleanField(default=False)),
('is_open_wednesday', models.BooleanField(default=False)),
('is_open_thursday', models.BooleanField(default=False)),
('is_open_friday', models.BooleanField(default=False)),
('is_open_saturday', models.BooleanField(default=False)),
('is_open_sunday', models.BooleanField(default=False)),
('monday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('tuesday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('wednesday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('thursday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('friday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('saturday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('sunday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('monday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('tuesday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('wednesday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('thursday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('friday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('saturday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('sunday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('is_aggregated', models.BooleanField(default=True, db_index=True)),
('has_shipping_rate_override', models.BooleanField(default=False)),
('is_comics_vendor', models.BooleanField(default=True)),
('is_furniture_vendor', models.BooleanField(default=False)),
('is_coins_vendor', models.BooleanField(default=False)),
('paypal_email', models.EmailField(max_length=254)),
('style', models.CharField(choices=[('ecantina-style-0.css', 'Green'), ('ecantina-style-1.css', 'Ligh Green'), ('ecantina-style-2.css', 'Aqua Green'), ('ecantina-style-3.css', 'Blue'), ('ecantina-style-4.css', 'Purple'), ('ecantina-style-5.css', 'Red'), ('ecantina-style-6.css', 'Dark Grey'), ('ecantina-style-7.css', 'Grey'), ('ecantina-style-8.css', 'Light Aqua Green'), ('ecantina-style-9.css', 'Yellow'), ('ecantina-style-10.css', 'Light Red'), ('ecantina-style-11.css', 'Dark Blue'), ('ecantina-style-black.css', 'Black')], default='ecantina-style-5.css', max_length=31)),
('employees', models.ManyToManyField(to='api.Employee', blank=True)),
('header', models.ForeignKey(related_name='store_header', to='api.ImageUpload', blank=True, null=True)),
('logo', models.ForeignKey(related_name='store_logo', to='api.ImageUpload', blank=True, null=True)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_stores',
'ordering': ('store_id',),
},
),
migrations.CreateModel(
name='StoreShippingPreference',
fields=[
('shipping_pref_id', models.AutoField(serialize=False, primary_key=True)),
('is_pickup_only', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_store_shipping_preferences',
'ordering': ('organization',),
},
),
migrations.CreateModel(
name='StoreShippingRate',
fields=[
('shipping_rate_id', models.AutoField(serialize=False, primary_key=True)),
('country', models.PositiveSmallIntegerField(choices=[(124, 'Canada'), (840, 'United States'), (484, 'Mexico')], null=True, validators=[django.core.validators.MinValueValidator(4), django.core.validators.MaxValueValidator(840)], blank=True)),
('comics_rate1', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate2', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate3', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate4', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate5', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate6', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate7', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate8', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate9', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate10', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('organization', models.ForeignKey(to='api.Organization')),
('store', models.ForeignKey(to='api.Store')),
],
options={
'db_table': 'ec_store_shipping_rates',
'ordering': ('country',),
},
),
migrations.CreateModel(
name='SubDomain',
fields=[
('sub_domain_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, null=True, blank=True, db_index=True, max_length=127)),
('organization', models.ForeignKey(to='api.Organization', blank=True, null=True)),
('store', models.ForeignKey(to='api.Store', blank=True, null=True)),
],
options={
'db_table': 'ec_subdomains',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Tag',
fields=[
('tag_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('discount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount_type', models.PositiveSmallIntegerField(choices=[(1, '%'), (2, '$')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_tags',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='UnifiedShippingRate',
fields=[
('shipping_rate_id', models.AutoField(serialize=False, primary_key=True)),
('country', models.PositiveSmallIntegerField(choices=[(124, 'Canada'), (840, 'United States'), (484, 'Mexico')], null=True, validators=[django.core.validators.MinValueValidator(4), django.core.validators.MaxValueValidator(840)], blank=True)),
('comics_rate1', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate2', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate3', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate4', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate5', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate6', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate7', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate8', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate9', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate10', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
],
options={
'db_table': 'ec_unified_shipping_rates',
'ordering': ('country',),
},
),
migrations.CreateModel(
name='Wishlist',
fields=[
('wishlist_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(to='api.Customer')),
('product', models.ForeignKey(to='api.Product')),
],
options={
'db_table': 'ec_wishlists',
},
),
migrations.AddField(
model_name='storeshippingpreference',
name='rates',
field=models.ManyToManyField(related_name='store_shipping_rates', to='api.StoreShippingRate', blank=True, db_index=True),
),
migrations.AddField(
model_name='storeshippingpreference',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='section',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='receipt',
name='store',
field=models.ForeignKey(to='api.Store', blank=True, null=True),
),
migrations.AddField(
model_name='pulllist',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='product',
name='section',
field=models.ForeignKey(to='api.Section'),
),
migrations.AddField(
model_name='product',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='product',
name='tags',
field=models.ManyToManyField(related_name='product_tags', to='api.Tag', blank=True, db_index=True),
),
migrations.AddField(
model_name='printhistory',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='orgshippingpreference',
name='rates',
field=models.ManyToManyField(related_name='ord_shipping_rates', to='api.OrgShippingRate', blank=True, db_index=True),
),
migrations.AddField(
model_name='helprequest',
name='organization',
field=models.ForeignKey(to='api.Organization', blank=True, null=True),
),
migrations.AddField(
model_name='helprequest',
name='screenshot',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True),
),
migrations.AddField(
model_name='helprequest',
name='store',
field=models.ForeignKey(to='api.Store', blank=True, null=True),
),
migrations.AddField(
model_name='gcdstory',
name='type',
field=models.ForeignKey(to='api.GCDStoryType'),
),
migrations.AddField(
model_name='gcdissue',
name='series',
field=models.ForeignKey(to='api.GCDSeries', null=True),
),
migrations.AddField(
model_name='gcdindiciapublisher',
name='parent',
field=models.ForeignKey(to='api.GCDPublisher', null=True),
),
migrations.AddField(
model_name='gcdbranduse',
name='publisher',
field=models.ForeignKey(to='api.GCDPublisher'),
),
migrations.AddField(
model_name='gcdbrandgroup',
name='parent',
field=models.ForeignKey(to='api.GCDPublisher', null=True),
),
migrations.AddField(
model_name='gcdbrandemblemgroup',
name='brandgroup',
field=models.ForeignKey(to='api.GCDBrandGroup', null=True),
),
migrations.AddField(
model_name='gcdbrand',
name='group',
field=models.ManyToManyField(db_table='gcd_brand_emblem_group', to='api.GCDBrandGroup', blank=True),
),
migrations.AddField(
model_name='gcdbrand',
name='images',
field=models.ManyToManyField(to='api.GCDImage', blank=True),
),
migrations.AddField(
model_name='gcdbrand',
name='parent',
field=models.ForeignKey(to='api.GCDPublisher', null=True),
),
migrations.AddField(
model_name='employee',
name='organization',
field=models.ForeignKey(to='api.Organization'),
),
migrations.AddField(
model_name='employee',
name='profile',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL),
),
migrations.AddField(
model_name='employee',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='emailsubscription',
name='organization',
field=models.ForeignKey(to='api.Organization', blank=True, null=True),
),
migrations.AddField(
model_name='emailsubscription',
name='store',
field=models.ForeignKey(to='api.Store', blank=True, null=True),
),
migrations.AddField(
model_name='customer',
name='profile',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True),
),
migrations.AddField(
model_name='customer',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, blank=True, null=True),
),
migrations.AddField(
model_name='comic',
name='issue',
field=models.ForeignKey(to='api.GCDIssue', blank=True, null=True),
),
migrations.AddField(
model_name='comic',
name='organization',
field=models.ForeignKey(to='api.Organization'),
),
migrations.AddField(
model_name='comic',
name='product',
field=models.ForeignKey(to='api.Product'),
),
migrations.AddField(
model_name='catalogitem',
name='image',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True),
),
migrations.AddField(
model_name='catalogitem',
name='organization',
field=models.ForeignKey(to='api.Organization'),
),
migrations.AddField(
model_name='catalogitem',
name='store',
field=models.ForeignKey(to='api.Store'),
),
]
| 93.256343 | 5,778 | 0.572679 |
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import django.db.models.deletion
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BannedDomain',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, db_index=True, max_length=63)),
('banned_on', models.DateTimeField(auto_now_add=True)),
('reason', models.CharField(null=True, blank=True, max_length=127)),
],
options={
'db_table': 'ec_banned_domains',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='BannedIP',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('address', models.GenericIPAddressField(unique=True, db_index=True)),
('banned_on', models.DateTimeField(auto_now_add=True)),
('reason', models.CharField(null=True, blank=True, max_length=127)),
],
options={
'db_table': 'ec_banned_ips',
'ordering': ('address',),
},
),
migrations.CreateModel(
name='BannedWord',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('text', models.CharField(unique=True, db_index=True, max_length=63)),
('banned_on', models.DateTimeField(auto_now_add=True)),
('reason', models.CharField(null=True, blank=True, max_length=127)),
],
options={
'db_table': 'ec_banned_words',
'ordering': ('text',),
},
),
migrations.CreateModel(
name='Brand',
fields=[
('brand_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=127)),
],
options={
'db_table': 'ec_brands',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='CatalogItem',
fields=[
('catalog_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=127)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Comic'), (2, 'Furniture'), (3, 'Coin')], default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)], db_index=True)),
('description', models.TextField(default='', blank=True)),
('brand_name', models.CharField(db_index=True, max_length=127)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('length_in_meters', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('width_in_meters', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('height_in_meters', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('weight_in_kilograms', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('volume_in_litres', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], blank=True)),
('materials', models.CharField(null=True, blank=True, max_length=127)),
('is_tangible', models.BooleanField(default=True)),
('is_flammable', models.BooleanField(default=False)),
('is_biohazard', models.BooleanField(default=False)),
('is_toxic', models.BooleanField(default=False)),
('is_explosive', models.BooleanField(default=False)),
('is_corrosive', models.BooleanField(default=False)),
('is_volatile', models.BooleanField(default=False)),
('is_radioactive', models.BooleanField(default=False)),
('is_restricted', models.BooleanField(default=False)),
('restrictions', models.TextField(default='', blank=True)),
],
options={
'db_table': 'ec_catalog_items',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Category',
fields=[
('category_id', models.AutoField(serialize=False, primary_key=True)),
('parent_id', models.PositiveIntegerField(default=0)),
('name', models.CharField(max_length=127)),
],
options={
'db_table': 'ec_categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Comic',
fields=[
('comic_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('is_cgc_rated', models.BooleanField(default=False)),
('age', models.PositiveSmallIntegerField(choices=[(1, 'Gold'), (2, 'Silver'), (3, 'Bronze'), (4, 'Copper')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(4)], blank=True)),
('cgc_rating', models.FloatField(choices=[(10.0, '10.0'), (9.9, '9.9'), (9.8, '9.8'), (9.6, '9.6'), (9.4, '9.4'), (9.2, '9.2'), (9.0, '9.0'), (8.5, '8.5'), (8.0, '8.0'), (7.5, '7.5'), (7.0, '7.0'), (6.5, '6.5'), (6.0, '6.0'), (5.5, '5.5'), (5.0, '5.0'), (4.5, '4.5'), (4.0, '4.0'), (3.5, '3.5'), (3.0, '3.0'), (2.5, '2.5'), (2.0, '2.0'), (1.8, '1.8'), (1.5, '1.5'), (1.0, '1.0'), (0.5, '.5'), (0, 'NR')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)], blank=True)),
('label_colour', models.CharField(choices=[('Purple', 'Purple'), ('Red', 'Red'), ('Blue', 'Blue'), ('Yellow', 'Yellow')], null=True, blank=True, max_length=63)),
('condition_rating', models.PositiveSmallIntegerField(choices=[(10, 'Near Mint'), (8, 'Very Fine'), (6, 'Fine'), (4, 'Very Good'), (2, 'Good'), (1, 'Fair'), (0, 'Poor')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)], blank=True)),
('is_canadian_priced_variant', models.BooleanField(default=False)),
('is_variant_cover', models.BooleanField(default=False)),
('is_retail_incentive_variant', models.BooleanField(default=False)),
('is_newsstand_edition', models.BooleanField(default=False)),
('catalog', models.ForeignKey(to='api.CatalogItem', blank=True, null=True)),
],
options={
'db_table': 'ec_comics',
'ordering': ('issue',),
},
),
migrations.CreateModel(
name='Customer',
fields=[
('customer_id', models.AutoField(serialize=False, primary_key=True)),
('joined', models.DateTimeField(db_index=True, auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False)),
('is_tos_signed', models.BooleanField(default=False)),
('wants_newsletter', models.BooleanField(default=False)),
('wants_flyers', models.BooleanField(default=False)),
('is_verified', models.BooleanField(default=False)),
('verification_key', models.CharField(default='', blank=True, max_length=63)),
('first_name', models.CharField(db_index=True, max_length=63)),
('last_name', models.CharField(db_index=True, max_length=63)),
('email', models.EmailField(unique=True, null=True, blank=True, db_index=True, max_length=254)),
('date_of_birth', models.DateField(default=datetime.datetime.now)),
('billing_phone', models.CharField(null=True, blank=True, db_index=True, max_length=10)),
('billing_street_name', models.CharField(max_length=63)),
('billing_street_number', models.CharField(max_length=15)),
('billing_unit_number', models.CharField(null=True, blank=True, max_length=15)),
('billing_city', models.CharField(max_length=63)),
('billing_province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('billing_country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('billing_postal', models.CharField(db_index=True, max_length=31)),
('is_shipping_same_as_billing', models.BooleanField(default=False)),
('shipping_phone', models.CharField(null=True, blank=True, db_index=True, max_length=10)),
('shipping_street_name', models.CharField(max_length=63)),
('shipping_street_number', models.CharField(max_length=15)),
('shipping_unit_number', models.CharField(null=True, blank=True, max_length=15)),
('shipping_city', models.CharField(max_length=63)),
('shipping_province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('shipping_country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('shipping_postal', models.CharField(db_index=True, max_length=31)),
('has_consented', models.BooleanField(default=False)),
('qrcode', models.ImageField(null=True, blank=True, upload_to='qrcode')),
],
options={
'db_table': 'ec_customers',
'ordering': ('last_name', 'first_name'),
},
),
migrations.CreateModel(
name='EmailSubscription',
fields=[
('subscription_id', models.AutoField(serialize=False, primary_key=True)),
('email', models.EmailField(unique=True, db_index=True, max_length=254)),
('submission_date', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'ec_email_subscriptions',
'ordering': ('submission_date',),
},
),
migrations.CreateModel(
name='Employee',
fields=[
('employee_id', models.AutoField(serialize=False, primary_key=True)),
('role', models.PositiveSmallIntegerField(choices=[(0, 'Owner'), (1, 'Manager'), (2, 'Worker')], default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(3)])),
('is_verified', models.BooleanField(default=False)),
('verification_key', models.CharField(default='', blank=True, max_length=63)),
('joined', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False)),
('is_tos_signed', models.BooleanField(default=False)),
],
options={
'db_table': 'ec_employees',
'ordering': ('employee_id',),
},
),
migrations.CreateModel(
name='GCDBrand',
fields=[
('brand_id', models.AutoField(serialize=False, primary_key=True)),
('issue_count', models.IntegerField(default=0)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.IntegerField(null=True, db_index=True)),
('year_ended', models.IntegerField(null=True)),
('year_began_uncertain', models.BooleanField(db_index=True)),
('year_ended_uncertain', models.BooleanField(db_index=True)),
('notes', models.TextField()),
('keywords', models.TextField(null=True)),
('url', models.URLField(default='', blank=True, max_length=255)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
],
options={
'db_table': 'gcd_brands',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDBrandEmblemGroup',
fields=[
('brand_emblem_group_id', models.AutoField(serialize=False, primary_key=True)),
('brand', models.ForeignKey(to='api.GCDBrand', null=True)),
],
options={
'db_table': 'gcd_brand_emblem_groups',
'ordering': ('brand',),
},
),
migrations.CreateModel(
name='GCDBrandGroup',
fields=[
('brand_group_id', models.AutoField(serialize=False, primary_key=True)),
('issue_count', models.IntegerField(default=0)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.IntegerField(null=True, db_index=True)),
('year_ended', models.IntegerField(null=True)),
('year_began_uncertain', models.BooleanField(db_index=True)),
('year_ended_uncertain', models.BooleanField(db_index=True)),
('notes', models.TextField()),
('keywords', models.TextField(null=True)),
('url', models.URLField(default='', blank=True, max_length=255)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
],
options={
'db_table': 'gcd_brand_groups',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDBrandUse',
fields=[
('brand_use_id', models.AutoField(serialize=False, primary_key=True)),
('year_began', models.IntegerField(null=True, db_index=True)),
('year_ended', models.IntegerField(null=True)),
('year_began_uncertain', models.BooleanField(db_index=True)),
('year_ended_uncertain', models.BooleanField(db_index=True)),
('notes', models.TextField()),
('reserved', models.BooleanField(default=0, db_index=True)),
('created', models.DateField(auto_now_add=True)),
('modified', models.DateField(auto_now=True)),
('emblem', models.ForeignKey(related_name='in_use', to='api.GCDBrand')),
],
options={
'db_table': 'gcd_brand_uses',
'ordering': ('publisher',),
},
),
migrations.CreateModel(
name='GCDCountry',
fields=[
('country_id', models.AutoField(serialize=False, primary_key=True)),
('code', models.CharField(unique=True, max_length=10)),
('name', models.CharField(db_index=True, max_length=255)),
],
options={
'db_table': 'gcd_countries',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDImage',
fields=[
('image_id', models.AutoField(serialize=False, primary_key=True)),
('type', models.CharField(db_index=True, max_length=255)),
('file', models.FileField(null=True, upload_to='uploads')),
],
options={
'db_table': 'gcd_images',
},
),
migrations.CreateModel(
name='GCDIndiciaPublisher',
fields=[
('indicia_publisher_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.PositiveSmallIntegerField(null=True, db_index=True)),
('year_ended', models.PositiveSmallIntegerField(null=True)),
('year_began_uncertain', models.BooleanField(default=False, db_index=True)),
('year_ended_uncertain', models.BooleanField(default=False, db_index=True)),
('notes', models.TextField(null=True, blank=True)),
('url', models.URLField(default='', null=True, blank=True, max_length=255)),
('is_surrogate', models.BooleanField(db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('imprint_count', models.IntegerField(default=0)),
('brand_count', models.IntegerField(default=0, db_index=True)),
('indicia_publisher_count', models.IntegerField(default=0, db_index=True)),
('series_count', models.IntegerField(default=0)),
('issue_count', models.IntegerField(default=0)),
('country', models.ForeignKey(to='api.GCDCountry')),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
],
options={
'db_table': 'gcd_indicia_publishers',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDIssue',
fields=[
('issue_id', models.AutoField(serialize=False, primary_key=True)),
('number', models.CharField(db_index=True, max_length=50)),
('title', models.CharField(db_index=True, max_length=255)),
('no_title', models.BooleanField(default=False, db_index=True)),
('volume', models.CharField(db_index=True, max_length=50)),
('no_volume', models.BooleanField(default=False, db_index=True)),
('display_volume_with_number', models.BooleanField(default=False, db_index=True)),
('isbn', models.CharField(db_index=True, max_length=32)),
('no_isbn', models.BooleanField(default=False, db_index=True)),
('valid_isbn', models.CharField(db_index=True, max_length=13)),
('variant_of_id', models.IntegerField(default=0, db_index=True)),
('variant_name', models.CharField(max_length=255)),
('barcode', models.CharField(db_index=True, max_length=38)),
('no_barcode', models.BooleanField(default=False)),
('rating', models.CharField(default='', db_index=True, max_length=255)),
('no_rating', models.BooleanField(default=False, db_index=True)),
('is_first_issue', models.BooleanField(default=False)),
('is_last_issue', models.BooleanField(default=False)),
('publication_date', models.CharField(max_length=255)),
('key_date', models.CharField(db_index=True, max_length=10)),
('on_sale_date', models.CharField(db_index=True, max_length=10)),
('on_sale_date_uncertain', models.BooleanField(default=False)),
('sort_code', models.IntegerField(db_index=True)),
('indicia_frequency', models.CharField(max_length=255)),
('no_indicia_frequency', models.BooleanField(default=False, db_index=True)),
('price', models.CharField(max_length=255)),
('page_count', models.DecimalField(max_digits=10, null=True, decimal_places=3)),
('page_count_uncertain', models.BooleanField(default=False)),
('editing', models.TextField()),
('no_editing', models.BooleanField(default=False, db_index=True)),
('notes', models.TextField(null=True)),
('keywords', models.TextField(null=True)),
('is_indexed', models.IntegerField(default=0, db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('indicia_pub_not_printed', models.BooleanField(default=False)),
('no_brand', models.BooleanField(default=False, db_index=True)),
('small_url', models.URLField(null=True, blank=True, max_length=255)),
('medium_url', models.URLField(null=True, blank=True, max_length=255)),
('large_url', models.URLField(null=True, blank=True, max_length=255)),
('alt_small_url', models.URLField(null=True, blank=True, max_length=255)),
('alt_medium_url', models.URLField(null=True, blank=True, max_length=255)),
('alt_large_url', models.URLField(null=True, blank=True, max_length=255)),
('has_alternative', models.BooleanField(default=False)),
('publisher_name', models.CharField(db_index=True, max_length=255)),
('genre', models.CharField(null=True, blank=True, db_index=True, max_length=255)),
('product_name', models.CharField(null=True, blank=True, db_index=True, max_length=511)),
('brand', models.ForeignKey(to='api.GCDBrand', null=True)),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
('indicia_publisher', models.ForeignKey(to='api.GCDIndiciaPublisher', null=True)),
],
options={
'db_table': 'gcd_issues',
'ordering': ['series', 'sort_code'],
},
),
migrations.CreateModel(
name='GCDLanguage',
fields=[
('language_id', models.AutoField(serialize=False, primary_key=True)),
('code', models.CharField(unique=True, max_length=10)),
('name', models.CharField(db_index=True, max_length=255)),
],
options={
'db_table': 'gcd_languages',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDPublisher',
fields=[
('publisher_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=255)),
('year_began', models.PositiveSmallIntegerField(null=True, db_index=True)),
('year_ended', models.PositiveSmallIntegerField(null=True)),
('year_began_uncertain', models.BooleanField(default=False, db_index=True)),
('year_ended_uncertain', models.BooleanField(default=False, db_index=True)),
('notes', models.TextField(null=True, blank=True)),
('url', models.URLField(default='', null=True, blank=True, max_length=255)),
('is_master', models.BooleanField(default=False, db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('imprint_count', models.IntegerField(default=0)),
('brand_count', models.IntegerField(default=0, db_index=True)),
('indicia_publisher_count', models.IntegerField(default=0, db_index=True)),
('series_count', models.IntegerField(default=0)),
('issue_count', models.IntegerField(default=0)),
('country', models.ForeignKey(to='api.GCDCountry')),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
('parent', models.ForeignKey(related_name='imprint_set', to='api.GCDPublisher', null=True)),
],
options={
'db_table': 'gcd_publishers',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='GCDSeries',
fields=[
('series_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=255)),
('sort_name', models.CharField(db_index=True, max_length=255)),
('format', models.CharField(default='', max_length=255)),
('color', models.CharField(default='', max_length=255)),
('dimensions', models.CharField(default='', max_length=255)),
('paper_stock', models.CharField(default='', max_length=255)),
('binding', models.CharField(default='', max_length=255)),
('publishing_format', models.CharField(default='', max_length=255)),
('tracking_notes', models.TextField(null=True, blank=True)),
('notes', models.TextField(null=True, blank=True)),
('publication_notes', models.TextField(null=True, blank=True)),
('keywords', models.TextField(null=True, blank=True)),
('year_began', models.IntegerField(db_index=True)),
('year_ended', models.IntegerField(default=0, null=True, blank=True)),
('year_began_uncertain', models.BooleanField(default=False)),
('year_ended_uncertain', models.BooleanField(default=False)),
('publication_dates', models.CharField(max_length=255)),
('has_barcode', models.BooleanField(default=False)),
('has_indicia_frequency', models.BooleanField(default=False)),
('has_isbn', models.BooleanField(default=False)),
('has_issue_title', models.BooleanField(default=False)),
('has_volume', models.BooleanField(default=False)),
('has_rating', models.BooleanField(default=False)),
('is_current', models.BooleanField(default=False)),
('is_comics_publication', models.BooleanField(default=False)),
('is_singleton', models.BooleanField(default=False)),
('issue_count', models.IntegerField(default=0, null=True, blank=True)),
('has_gallery', models.BooleanField(default=False, db_index=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('open_reserve', models.IntegerField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('cover_url', models.URLField(null=True, blank=True, max_length=255)),
('publication_type_id', models.IntegerField(null=True, blank=0)),
('publisher_name', models.CharField(db_index=True, max_length=255)),
('country', models.ForeignKey(to='api.GCDCountry')),
('images', models.ManyToManyField(to='api.GCDImage', blank=True)),
('language', models.ForeignKey(to='api.GCDLanguage')),
('publisher', models.ForeignKey(to='api.GCDPublisher')),
],
options={
'db_table': 'gcd_series',
'ordering': ['sort_name', 'year_began'],
},
),
migrations.CreateModel(
name='GCDStory',
fields=[
('story_id', models.AutoField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=255)),
('title_inferred', models.BooleanField(default=False, db_index=True)),
('feature', models.CharField(max_length=255)),
('sequence_number', models.IntegerField()),
('page_count', models.DecimalField(max_digits=10, null=True, decimal_places=3, db_index=True)),
('page_count_uncertain', models.BooleanField(default=False, db_index=True)),
('script', models.TextField()),
('pencils', models.TextField()),
('inks', models.TextField()),
('colors', models.TextField()),
('letters', models.TextField()),
('editing', models.TextField()),
('no_script', models.BooleanField(default=False, db_index=True)),
('no_pencils', models.BooleanField(default=False, db_index=True)),
('no_inks', models.BooleanField(default=False, db_index=True)),
('no_colors', models.BooleanField(default=False, db_index=True)),
('no_letters', models.BooleanField(default=False, db_index=True)),
('no_editing', models.BooleanField(default=False, db_index=True)),
('job_number', models.CharField(max_length=25)),
('genre', models.CharField(max_length=255)),
('characters', models.TextField()),
('synopsis', models.TextField()),
('reprint_notes', models.TextField()),
('notes', models.TextField()),
('keywords', models.TextField(null=True)),
('reserved', models.BooleanField(default=False, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('deleted', models.BooleanField(default=False, db_index=True)),
('issue', models.ForeignKey(to='api.GCDIssue')),
],
options={
'db_table': 'gcd_stories',
'ordering': ('sequence_number',),
},
),
migrations.CreateModel(
name='GCDStoryType',
fields=[
('story_type_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, db_index=True, max_length=50)),
('sort_code', models.IntegerField(unique=True)),
],
options={
'db_table': 'gcd_story_types',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='HelpRequest',
fields=[
('help_id', models.AutoField(serialize=False, primary_key=True)),
('subject', models.PositiveSmallIntegerField(choices=[(1, 'Feedback'), (2, 'Error'), (3, 'Checkout'), (4, 'Inventory'), (5, 'Pull List'), (6, 'Sales'), (7, 'Emailing List'), (8, 'Store Settings / Users'), (9, 'Dashboard')], default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
('subject_url', models.URLField(null=True, blank=True)),
('message', models.TextField()),
('submission_date', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(to='api.Customer', blank=True, null=True)),
('employee', models.ForeignKey(to='api.Employee', blank=True, null=True)),
],
options={
'db_table': 'ec_help_requests',
'ordering': ('submission_date',),
},
),
migrations.CreateModel(
name='ImageBinaryUpload',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_index=True)),
('created', models.DateField(null=True, auto_now=True)),
('file_type', models.CharField(choices=[('png', 'Portable Network Graphics (PNG)'), ('jpeg', 'Joint Photographic Experts Group picture (JPEG)'), ('jpg', 'Joint Photographic Experts Group picture (JPG)'), ('bmp', 'Bitmap Image File (BMP)'), ('tiff', 'Tagged Image File Format (TIFF)'), ('gif', 'Graphics Interchange Format (GIF)')], db_index=True, max_length=4)),
('mime_type', models.CharField(choices=[('image/png', 'PNG'), ('image/jpeg', 'JPEG/JPG'), ('image/bmp', 'BMP'), ('image/tiff', 'TIFF'), ('image/gif', 'GIF')], default='image/jpeg', db_index=True, max_length=15)),
('data', models.BinaryField()),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'ec_image_binary_uploads',
},
),
migrations.CreateModel(
name='ImageUpload',
fields=[
('upload_id', models.AutoField(serialize=False, primary_key=True)),
('upload_date', models.DateField(null=True, auto_now=True)),
('is_assigned', models.BooleanField(default=False)),
('image', models.ImageField(null=True, blank=True, upload_to='upload')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'ec_image_uploads',
'ordering': ('upload_date',),
},
),
migrations.CreateModel(
name='Organization',
fields=[
('org_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('description', models.TextField(null=True, blank=True)),
('joined', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False, db_index=True)),
('is_listed', models.BooleanField(default=True, db_index=True)),
('street_name', models.CharField(max_length=63)),
('street_number', models.CharField(null=True, blank=True, max_length=31)),
('unit_number', models.CharField(null=True, blank=True, max_length=15)),
('city', models.CharField(max_length=63)),
('province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('postal', models.CharField(max_length=31)),
('currency', models.PositiveSmallIntegerField(choices=[(124, 'CAD'), (840, 'USD')], default=124)),
('language', models.CharField(choices=[('EN', 'English')], default='EN', max_length=2)),
('website', models.URLField(null=True, blank=True)),
('email', models.EmailField(null=True, blank=True, max_length=254)),
('phone', models.CharField(null=True, blank=True, max_length=10)),
('fax', models.CharField(null=True, blank=True, max_length=10)),
('twitter', models.CharField(null=True, blank=True, max_length=15)),
('facebook_url', models.URLField(null=True, blank=True)),
('instagram_url', models.URLField(null=True, blank=True)),
('linkedin_url', models.URLField(null=True, blank=True)),
('github_url', models.URLField(null=True, blank=True)),
('google_url', models.URLField(null=True, blank=True)),
('youtube_url', models.URLField(null=True, blank=True)),
('flickr_url', models.URLField(null=True, blank=True)),
('paypal_email', models.EmailField(max_length=254)),
('style', models.CharField(choices=[('ecantina-style-0.css', 'Green'), ('ecantina-style-1.css', 'Ligh Green'), ('ecantina-style-2.css', 'Aqua Green'), ('ecantina-style-3.css', 'Blue'), ('ecantina-style-4.css', 'Purple'), ('ecantina-style-5.css', 'Red'), ('ecantina-style-6.css', 'Dark Grey'), ('ecantina-style-7.css', 'Grey'), ('ecantina-style-8.css', 'Light Aqua Green'), ('ecantina-style-9.css', 'Yellow'), ('ecantina-style-10.css', 'Light Red'), ('ecantina-style-11.css', 'Dark Blue'), ('ecantina-style-black.css', 'Black')], default='ecantina-style-5.css', max_length=31)),
('administrator', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
('customers', models.ManyToManyField(to='api.Customer', blank=True)),
('header', models.ForeignKey(related_name='org_header', to='api.ImageUpload', blank=True, null=True)),
('logo', models.ForeignKey(related_name='org_logo', to='api.ImageUpload', blank=True, null=True)),
],
options={
'db_table': 'ec_organizations',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='OrgShippingPreference',
fields=[
('shipping_pref_id', models.AutoField(serialize=False, primary_key=True)),
('is_pickup_only', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_org_shipping_preferences',
'ordering': ('organization',),
},
),
migrations.CreateModel(
name='OrgShippingRate',
fields=[
('shipping_rate_id', models.AutoField(serialize=False, primary_key=True)),
('country', models.PositiveSmallIntegerField(choices=[(124, 'Canada'), (840, 'United States'), (484, 'Mexico')], null=True, validators=[django.core.validators.MinValueValidator(4), django.core.validators.MaxValueValidator(840)], blank=True)),
('comics_rate1', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate2', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate3', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate4', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate5', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate6', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate7', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate8', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate9', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate10', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_org_shipping_rates',
'ordering': ('country',),
},
),
migrations.CreateModel(
name='PrintHistory',
fields=[
('print_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('filename', models.CharField(db_index=True, max_length=127)),
('url', models.URLField()),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_print_history',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('product_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(null=True, blank=True, db_index=True, max_length=511)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Comic'), (2, 'Furniture'), (3, 'Coin')], default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)], db_index=True)),
('description', models.TextField(default='', blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_sold', models.BooleanField(default=False, db_index=True)),
('is_listed', models.BooleanField(default=True, db_index=True)),
('is_new', models.BooleanField(default=False, db_index=True)),
('is_featured', models.BooleanField(default=False, db_index=True)),
('sub_price', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('has_tax', models.BooleanField(default=True)),
('tax_rate', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('tax_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('sub_price_with_tax', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('discount_type', models.PositiveSmallIntegerField(choices=[(1, '%'), (2, '$')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('price', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('cost', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('currency', models.PositiveSmallIntegerField(choices=[(124, 'CAD'), (840, 'USD')], default=124)),
('language', models.CharField(choices=[('EN', 'English')], default='EN', max_length=2)),
('image_url', models.URLField(null=True, blank=True)),
('qrcode', models.ImageField(null=True, blank=True, upload_to='qrcode')),
('is_qrcode_printed', models.BooleanField(default=False)),
('has_no_shipping', models.BooleanField(default=False)),
('is_unlimited', models.BooleanField(default=False)),
('brand', models.ForeignKey(to='api.Brand', blank=True, null=True)),
('category', models.ForeignKey(to='api.Category')),
('image', models.ForeignKey(to='api.ImageUpload', blank=True, null=True)),
('images', models.ManyToManyField(related_name='product_images', to='api.ImageUpload', blank=True)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_products',
'ordering': ('product_id', 'type'),
},
),
migrations.CreateModel(
name='Promotion',
fields=[
('promotion_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('discount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount_type', models.PositiveSmallIntegerField(choices=[(1, '%'), (2, '$')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_promotions',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Pulllist',
fields=[
('pulllist_id', models.AutoField(serialize=False, primary_key=True)),
('organization', models.ForeignKey(to='api.Organization')),
('series', models.ForeignKey(to='api.GCDSeries', null=True)),
],
options={
'db_table': 'ec_pulllists',
'ordering': ('series',),
},
),
migrations.CreateModel(
name='PulllistSubscription',
fields=[
('subscription_id', models.AutoField(serialize=False, primary_key=True)),
('copies', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(to='api.Customer')),
('organization', models.ForeignKey(to='api.Organization')),
('pulllist', models.ForeignKey(to='api.Pulllist')),
],
options={
'db_table': 'ec_pulllists_subscriptions',
},
),
migrations.CreateModel(
name='Receipt',
fields=[
('receipt_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(db_index=True, auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('purchased', models.DateTimeField(null=True, blank=True, db_index=True)),
('comment', models.CharField(default='', null=True, blank=True, max_length=511)),
('has_purchased_online', models.BooleanField(default=False)),
('payment_method', models.PositiveSmallIntegerField(choices=[(1, 'Cash'), (2, 'Debit Card'), (3, 'Credit Card'), (4, 'Gift Card'), (5, 'Store Points'), (6, 'Cheque'), (7, 'PayPal'), (8, 'Invoice'), (9, 'Other')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9)])),
('status', models.PositiveSmallIntegerField(choices=[(1, 'New Order'), (2, 'Picked'), (3, 'Shipped'), (4, 'Received'), (5, 'In-Store Sale'), (6, 'Online Sale')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(6)], db_index=True)),
('has_shipping', models.BooleanField(default=False, db_index=True)),
('sub_total', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('has_tax', models.BooleanField(default=True)),
('tax_rate', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('tax_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('sub_total_with_tax', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('shipping_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('total_amount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('has_finished', models.BooleanField(default=False, db_index=True)),
('has_paid', models.BooleanField(default=False)),
('email', models.EmailField(null=True, blank=True, max_length=254)),
('billing_address', models.CharField(null=True, blank=True, max_length=63)),
('billing_phone', models.CharField(null=True, blank=True, max_length=10)),
('billing_city', models.CharField(null=True, blank=True, max_length=63)),
('billing_province', models.CharField(null=True, blank=True, max_length=63)),
('billing_country', models.CharField(null=True, blank=True, max_length=63)),
('billing_postal', models.CharField(null=True, blank=True, max_length=31)),
('shipping_address', models.CharField(null=True, blank=True, max_length=63)),
('shipping_phone', models.CharField(null=True, blank=True, max_length=10)),
('shipping_city', models.CharField(null=True, blank=True, max_length=63)),
('shipping_province', models.CharField(null=True, blank=True, max_length=63)),
('shipping_country', models.CharField(null=True, blank=True, max_length=63)),
('shipping_postal', models.CharField(null=True, blank=True, max_length=31)),
('has_error', models.BooleanField(default=False, db_index=True)),
('error', models.PositiveSmallIntegerField(choices=[(0, 'No Error'), (1, 'Cancelled Online Order')], default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('customer', models.ForeignKey(to='api.Customer', blank=True, null=True)),
('employee', models.ForeignKey(to='api.Employee', blank=True, null=True)),
('organization', models.ForeignKey(to='api.Organization', blank=True, null=True)),
('products', models.ManyToManyField(related_name='receipt_products', to='api.Product', blank=True)),
],
options={
'db_table': 'ec_receipts',
'ordering': ('last_updated',),
},
),
migrations.CreateModel(
name='Section',
fields=[
('section_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(db_index=True, max_length=127)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_sections',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Store',
fields=[
('store_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('description', models.TextField(null=True, blank=True)),
('joined', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('is_suspended', models.BooleanField(default=False, db_index=True)),
('is_listed', models.BooleanField(default=True, db_index=True)),
('tax_rate', models.DecimalField(default=0.13, max_digits=10, decimal_places=2)),
('street_name', models.CharField(max_length=63)),
('street_number', models.CharField(null=True, blank=True, max_length=31)),
('unit_number', models.CharField(null=True, blank=True, max_length=15)),
('city', models.CharField(max_length=63)),
('province', models.CharField(choices=[('Alberta', 'Alberta'), ('British Columbia', 'British Columbia'), ('Manitoba', 'Manitoba'), ('New Brunswick', 'New Brunswick'), ('Newfoundland and Labrador', 'Newfoundland and Labrador'), ('Nova Scotia', 'Nova Scotia'), ('Ontario', 'Ontario'), ('Prince Edward Island', 'Prince Edward Island'), ('Quebec', 'Quebec'), ('Saskatchewan', 'Saskatchewan'), ('Northwest Territories', 'Northwest Territories'), ('Nunavut', 'Nunavut'), ('Yukon', 'Yukon'), ('Alabama', 'Alabama'), ('Alaska', 'Alaska'), ('Arizona', 'Arizona'), ('Arkansas', 'Arkansas'), ('California', 'California'), ('Colorado', 'Colorado'), ('Connecticut', 'Connecticut'), ('Delaware', 'Delaware'), ('Florida', 'Florida'), ('Georgia', 'Georgia'), ('Hawaii', 'Hawaii'), ('Idaho', 'Idaho'), ('Illinois', 'Illinois'), ('Indiana', 'Indiana'), ('Iowa', 'Iowa'), ('Kansas', 'Kansas'), ('Kentucky', 'Kentucky'), ('Louisiana', 'Louisiana'), ('Maine', 'Maine'), ('Maryland', 'Maryland'), ('Massachusetts', 'Massachusetts'), ('Michigan', 'Michigan'), ('Minnesota', 'Minnesota'), ('Mississippi', 'Mississippi'), ('Missouri', 'Missouri'), ('Montana', 'Montana'), ('Nebraska', 'Nebraska'), ('Nevada', 'Nevada'), ('New Hampshire', 'New Hampshire'), ('New Jersey', 'New Jersey'), ('New Mexico', 'New Mexico'), ('New York', 'New York'), ('North Carolina', 'North Carolina'), ('North Dakota', 'North Dakota'), ('Ohio', 'Ohio'), ('Oklahoma', 'Oklahoma'), ('Oregon', 'Oregon'), ('Pennsylvania', 'Pennsylvania'), ('Rhode Island', 'Rhode Island'), ('South Carolina', 'South Carolina'), ('South Dakota', 'South Dakota'), ('Tennessee', 'Tennessee'), ('Texas', 'Texas'), ('Utah', 'Utah'), ('Vermont', 'Vermont'), ('Virginia', 'Virginia'), ('Washington', 'Washington'), ('West Virginia', 'West Virginia'), ('Wisconsin', 'Wisconsin'), ('Wyoming', 'Wyoming'), ('Other', 'Other')], max_length=63)),
('country', models.CharField(choices=[('Canada', 'Canada'), ('United States', 'United States'), ('Mexico', 'Mexico'), ('Afghanistan', 'Afghanistan'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas, The', 'Bahamas, The'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Brazil', 'Brazil'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burma', 'Burma'), ('Burundi', 'Burundi'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Cape Verde', 'Cape Verde'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo, Democratic Republic of the', 'Congo, Democratic Republic of the'), ('Congo, Republic of the', 'Congo, Republic of the'), ('Costa Rica', 'Costa Rica'), ("Cote d'Ivoire", "Cote d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curacao', 'Curacao'), ('Cyprus', 'Cyprus'), ('Czech Republic', 'Czech Republic'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('East Timor', 'East Timor'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('Gabon', 'Gabon'), ('Gambia, The', 'Gambia, The'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Guatemala', 'Guatemala'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Korea, North', 'Korea, North'), ('Korea, South', 'Korea, South'), ('Kosovo', 'Kosovo'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macau', 'Macau'), ('Macedonia', 'Macedonia'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mexico', 'Mexico'), ('Micronesia', 'Micronesia'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('Netherlands Antilles', 'Netherlands Antilles'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('North Korea', 'North Korea'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestinian Territories', 'Palestinian Territories'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Qatar', 'Qatar'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten', 'Sint Maarten'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Swaziland', 'Swaziland'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe'), ('Other', 'Other')], max_length=63)),
('postal', models.CharField(max_length=31)),
('currency', models.PositiveSmallIntegerField(choices=[(124, 'CAD'), (840, 'USD')], default=124)),
('language', models.CharField(choices=[('EN', 'English')], default='EN', max_length=2)),
('website', models.URLField(null=True, blank=True)),
('email', models.EmailField(null=True, blank=True, max_length=254)),
('phone', models.CharField(null=True, blank=True, max_length=10)),
('fax', models.CharField(null=True, blank=True, max_length=10)),
('is_open_monday', models.BooleanField(default=False)),
('is_open_tuesday', models.BooleanField(default=False)),
('is_open_wednesday', models.BooleanField(default=False)),
('is_open_thursday', models.BooleanField(default=False)),
('is_open_friday', models.BooleanField(default=False)),
('is_open_saturday', models.BooleanField(default=False)),
('is_open_sunday', models.BooleanField(default=False)),
('monday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('tuesday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('wednesday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('thursday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('friday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('saturday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('sunday_to', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('monday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('tuesday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('wednesday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('thursday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('friday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('saturday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('sunday_from', models.CharField(choices=[('08:00', '08:00'), ('08:30', '08:30'), ('09:00', '09:00'), ('09:30', '09:30'), ('10:00', '10:00'), ('10:30', '10:30'), ('11:00', '11:00'), ('11:30', '11:30'), ('12:00', '12:00'), ('12:30', '12:30'), ('13:00', '13:00'), ('13:30', '13:30'), ('14:00', '14:00'), ('14:30', '14:30'), ('15:00', '15:00'), ('15:30', '15:30'), ('16:00', '16:00'), ('16:30', '16:30'), ('17:00', '17:00'), ('17:30', '17:30'), ('18:00', '18:00'), ('18:30', '18:30'), ('19:00', '19:00'), ('19:30', '19:30'), ('20:00', '20:00'), ('20:30', '20:30'), ('21:00', '21:00'), ('21:30', '21:30'), ('22:00', '22:00'), ('22:30', '22:30')], null=True, blank=True, max_length=5)),
('is_aggregated', models.BooleanField(default=True, db_index=True)),
('has_shipping_rate_override', models.BooleanField(default=False)),
('is_comics_vendor', models.BooleanField(default=True)),
('is_furniture_vendor', models.BooleanField(default=False)),
('is_coins_vendor', models.BooleanField(default=False)),
('paypal_email', models.EmailField(max_length=254)),
('style', models.CharField(choices=[('ecantina-style-0.css', 'Green'), ('ecantina-style-1.css', 'Ligh Green'), ('ecantina-style-2.css', 'Aqua Green'), ('ecantina-style-3.css', 'Blue'), ('ecantina-style-4.css', 'Purple'), ('ecantina-style-5.css', 'Red'), ('ecantina-style-6.css', 'Dark Grey'), ('ecantina-style-7.css', 'Grey'), ('ecantina-style-8.css', 'Light Aqua Green'), ('ecantina-style-9.css', 'Yellow'), ('ecantina-style-10.css', 'Light Red'), ('ecantina-style-11.css', 'Dark Blue'), ('ecantina-style-black.css', 'Black')], default='ecantina-style-5.css', max_length=31)),
('employees', models.ManyToManyField(to='api.Employee', blank=True)),
('header', models.ForeignKey(related_name='store_header', to='api.ImageUpload', blank=True, null=True)),
('logo', models.ForeignKey(related_name='store_logo', to='api.ImageUpload', blank=True, null=True)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_stores',
'ordering': ('store_id',),
},
),
migrations.CreateModel(
name='StoreShippingPreference',
fields=[
('shipping_pref_id', models.AutoField(serialize=False, primary_key=True)),
('is_pickup_only', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_store_shipping_preferences',
'ordering': ('organization',),
},
),
migrations.CreateModel(
name='StoreShippingRate',
fields=[
('shipping_rate_id', models.AutoField(serialize=False, primary_key=True)),
('country', models.PositiveSmallIntegerField(choices=[(124, 'Canada'), (840, 'United States'), (484, 'Mexico')], null=True, validators=[django.core.validators.MinValueValidator(4), django.core.validators.MaxValueValidator(840)], blank=True)),
('comics_rate1', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate2', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate3', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate4', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate5', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate6', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate7', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate8', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate9', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate10', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('organization', models.ForeignKey(to='api.Organization')),
('store', models.ForeignKey(to='api.Store')),
],
options={
'db_table': 'ec_store_shipping_rates',
'ordering': ('country',),
},
),
migrations.CreateModel(
name='SubDomain',
fields=[
('sub_domain_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(unique=True, null=True, blank=True, db_index=True, max_length=127)),
('organization', models.ForeignKey(to='api.Organization', blank=True, null=True)),
('store', models.ForeignKey(to='api.Store', blank=True, null=True)),
],
options={
'db_table': 'ec_subdomains',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Tag',
fields=[
('tag_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=127)),
('discount', models.DecimalField(default=0.0, max_digits=10, decimal_places=2)),
('discount_type', models.PositiveSmallIntegerField(choices=[(1, '%'), (2, '$')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('organization', models.ForeignKey(to='api.Organization')),
],
options={
'db_table': 'ec_tags',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='UnifiedShippingRate',
fields=[
('shipping_rate_id', models.AutoField(serialize=False, primary_key=True)),
('country', models.PositiveSmallIntegerField(choices=[(124, 'Canada'), (840, 'United States'), (484, 'Mexico')], null=True, validators=[django.core.validators.MinValueValidator(4), django.core.validators.MaxValueValidator(840)], blank=True)),
('comics_rate1', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate2', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate3', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate4', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate5', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate6', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate7', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate8', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate9', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
('comics_rate10', models.DecimalField(default=0.0, max_digits=10, decimal_places=2, db_index=True)),
],
options={
'db_table': 'ec_unified_shipping_rates',
'ordering': ('country',),
},
),
migrations.CreateModel(
name='Wishlist',
fields=[
('wishlist_id', models.AutoField(serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(to='api.Customer')),
('product', models.ForeignKey(to='api.Product')),
],
options={
'db_table': 'ec_wishlists',
},
),
migrations.AddField(
model_name='storeshippingpreference',
name='rates',
field=models.ManyToManyField(related_name='store_shipping_rates', to='api.StoreShippingRate', blank=True, db_index=True),
),
migrations.AddField(
model_name='storeshippingpreference',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='section',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='receipt',
name='store',
field=models.ForeignKey(to='api.Store', blank=True, null=True),
),
migrations.AddField(
model_name='pulllist',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='product',
name='section',
field=models.ForeignKey(to='api.Section'),
),
migrations.AddField(
model_name='product',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='product',
name='tags',
field=models.ManyToManyField(related_name='product_tags', to='api.Tag', blank=True, db_index=True),
),
migrations.AddField(
model_name='printhistory',
name='store',
field=models.ForeignKey(to='api.Store'),
),
migrations.AddField(
model_name='orgshippingpreference',
name='rates',
field=models.ManyToManyField(related_name='ord_shipping_rates', to='api.OrgShippingRate', blank=True, db_index=True),
),
migrations.AddField(
model_name='helprequest',
name='organization',
field=models.ForeignKey(to='api.Organization', blank=True, null=True),
),
migrations.AddField(
model_name='helprequest',
name='screenshot',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True),
),
migrations.AddField(
model_name='helprequest',
name='store',
field=models.ForeignKey(to='api.Store', blank=True, null=True),
),
migrations.AddField(
model_name='gcdstory',
name='type',
field=models.ForeignKey(to='api.GCDStoryType'),
),
migrations.AddField(
model_name='gcdissue',
name='series',
field=models.ForeignKey(to='api.GCDSeries', null=True),
),
migrations.AddField(
model_name='gcdindiciapublisher',
name='parent',
field=models.ForeignKey(to='api.GCDPublisher', null=True),
),
migrations.AddField(
model_name='gcdbranduse',
name='publisher',
field=models.ForeignKey(to='api.GCDPublisher'),
),
migrations.AddField(
model_name='gcdbrandgroup',
name='parent',
field=models.ForeignKey(to='api.GCDPublisher', null=True),
),
migrations.AddField(
model_name='gcdbrandemblemgroup',
name='brandgroup',
field=models.ForeignKey(to='api.GCDBrandGroup', null=True),
),
migrations.AddField(
model_name='gcdbrand',
name='group',
field=models.ManyToManyField(db_table='gcd_brand_emblem_group', to='api.GCDBrandGroup', blank=True),
),
migrations.AddField(
model_name='gcdbrand',
name='images',
field=models.ManyToManyField(to='api.GCDImage', blank=True),
),
migrations.AddField(
model_name='gcdbrand',
name='parent',
field=models.ForeignKey(to='api.GCDPublisher', null=True),
),
migrations.AddField(
model_name='employee',
name='organization',
field=models.ForeignKey(to='api.Organization'),
),
migrations.AddField(
model_name='employee',
name='profile',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL),
),
migrations.AddField(
model_name='employee',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='emailsubscription',
name='organization',
field=models.ForeignKey(to='api.Organization', blank=True, null=True),
),
migrations.AddField(
model_name='emailsubscription',
name='store',
field=models.ForeignKey(to='api.Store', blank=True, null=True),
),
migrations.AddField(
model_name='customer',
name='profile',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True),
),
migrations.AddField(
model_name='customer',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, blank=True, null=True),
),
migrations.AddField(
model_name='comic',
name='issue',
field=models.ForeignKey(to='api.GCDIssue', blank=True, null=True),
),
migrations.AddField(
model_name='comic',
name='organization',
field=models.ForeignKey(to='api.Organization'),
),
migrations.AddField(
model_name='comic',
name='product',
field=models.ForeignKey(to='api.Product'),
),
migrations.AddField(
model_name='catalogitem',
name='image',
field=models.ForeignKey(to='api.ImageUpload', blank=True, null=True),
),
migrations.AddField(
model_name='catalogitem',
name='organization',
field=models.ForeignKey(to='api.Organization'),
),
migrations.AddField(
model_name='catalogitem',
name='store',
field=models.ForeignKey(to='api.Store'),
),
]
| true | true |
f7f4a30e273e88cd16c28b354b04b902a10bcef4 | 1,502 | py | Python | __main__.py | ishansharma/open_cv_feature_detection | 34f09d6e144d8220cca9295f0a59dba7f9488516 | [
"MIT"
] | null | null | null | __main__.py | ishansharma/open_cv_feature_detection | 34f09d6e144d8220cca9295f0a59dba7f9488516 | [
"MIT"
] | null | null | null | __main__.py | ishansharma/open_cv_feature_detection | 34f09d6e144d8220cca9295f0a59dba7f9488516 | [
"MIT"
] | null | null | null | from brief import brief
from camshift import camshift
from depth_detection import depth_detection as dd
from fast import fast
from hand_contours import detector as hc
from harris_corner_detection import subpixel as hsp
from image_operations import laplacian_derivative as lp
from image_operations import transformations as tf
from orb import convex_hull as ch
from orb import dt
from orb import orb
from shi_tomasi import shi_tomasi as st
choice_message = """
Which program should I run?
1. Basic Harris Detection
2. Harris Detection with Subpixel Accuracy
3. Shi Tomasi
4. FAST (Features from Accelerated Segment Test)
5. BRIEF (Binary Robust Independent Elementary Features)
6. ORB (Oriented FAST and Rotated BRIEF)
7. Camshift
8. Contour based detector
9. Image resize
10. Laplacian Derivative
11. Convex hull of points using ORB
12. Delaunay Triangulation
13. Depth Detection
"""
choice = int(input(choice_message))
hand_from_dataset = "../../dataset/Hands/Hand_0000083.jpg"
if choice == 1:
hc.run(hand_from_dataset)
if choice == 2:
hsp.run()
if choice == 3:
st.run(hand_from_dataset)
if choice == 4:
fast.run()
if choice == 5:
brief.run()
if choice == 6:
orb.run()
if choice == 7:
camshift.run()
if choice == 8:
hc.run(hand_from_dataset)
if choice == 9:
tf.resize()
if choice == 10:
lp.laplacian(hand_from_dataset)
if choice == 11:
ch.run(hand_from_dataset)
if choice == 12:
dt.run(hand_from_dataset)
if choice == 13:
dd.run()
| 20.297297 | 58 | 0.738349 | from brief import brief
from camshift import camshift
from depth_detection import depth_detection as dd
from fast import fast
from hand_contours import detector as hc
from harris_corner_detection import subpixel as hsp
from image_operations import laplacian_derivative as lp
from image_operations import transformations as tf
from orb import convex_hull as ch
from orb import dt
from orb import orb
from shi_tomasi import shi_tomasi as st
choice_message = """
Which program should I run?
1. Basic Harris Detection
2. Harris Detection with Subpixel Accuracy
3. Shi Tomasi
4. FAST (Features from Accelerated Segment Test)
5. BRIEF (Binary Robust Independent Elementary Features)
6. ORB (Oriented FAST and Rotated BRIEF)
7. Camshift
8. Contour based detector
9. Image resize
10. Laplacian Derivative
11. Convex hull of points using ORB
12. Delaunay Triangulation
13. Depth Detection
"""
choice = int(input(choice_message))
hand_from_dataset = "../../dataset/Hands/Hand_0000083.jpg"
if choice == 1:
hc.run(hand_from_dataset)
if choice == 2:
hsp.run()
if choice == 3:
st.run(hand_from_dataset)
if choice == 4:
fast.run()
if choice == 5:
brief.run()
if choice == 6:
orb.run()
if choice == 7:
camshift.run()
if choice == 8:
hc.run(hand_from_dataset)
if choice == 9:
tf.resize()
if choice == 10:
lp.laplacian(hand_from_dataset)
if choice == 11:
ch.run(hand_from_dataset)
if choice == 12:
dt.run(hand_from_dataset)
if choice == 13:
dd.run()
| true | true |
f7f4a34b0895923a9e1b7733f5c983c0b323fa68 | 1,791 | py | Python | docs/example-ingestion-script.py | vlro/terracotta | 26ef2f61bd8306fd8fecd27288df6426a6751534 | [
"MIT"
] | null | null | null | docs/example-ingestion-script.py | vlro/terracotta | 26ef2f61bd8306fd8fecd27288df6426a6751534 | [
"MIT"
] | null | null | null | docs/example-ingestion-script.py | vlro/terracotta | 26ef2f61bd8306fd8fecd27288df6426a6751534 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import re
import glob
import tqdm
import boto3
s3 = boto3.resource('s3')
import terracotta as tc
# settings
DB_NAME = 'terracotta.sqlite'
RASTER_GLOB = r'/path/to/rasters/*.tif'
RASTER_NAME_PATTERN = r'(?P<sensor>\w{2})_(?P<tile>\w{5})_(?P<date>\d{8})_(?P<band>\w+).tif'
KEYS = ('sensor', 'tile', 'date', 'band')
KEY_DESCRIPTIONS = {
'sensor': 'Sensor short name',
'tile': 'Sentinel-2 tile ID',
'date': 'Sensing date',
'band': 'Band or index name'
}
S3_BUCKET = 'tc-testdata'
S3_RASTER_FOLDER = 'rasters'
S3_PATH = f's3://{S3_BUCKET}/{S3_RASTER_FOLDER}'
driver = tc.get_driver(DB_NAME)
# create an empty database if it doesn't exist
if not os.path.isfile(DB_NAME):
driver.create(KEYS, KEY_DESCRIPTIONS)
# sanity check
assert driver.key_names == KEYS
available_datasets = driver.get_datasets()
raster_files = list(glob.glob(RASTER_GLOB))
pbar = tqdm.tqdm(raster_files)
for raster_path in pbar:
pbar.set_postfix(file=raster_path)
raster_filename = os.path.basename(raster_path)
# extract keys from filename
match = re.match(RASTER_NAME_PATTERN, raster_filename)
if match is None:
raise ValueError(f'Input file {raster_filename} does not match raster pattern')
keys = match.groups()
# skip already processed data
if keys in available_datasets:
continue
with driver.connect():
# since the rasters will be served from S3, we need to pass the correct remote path
driver.insert(keys, raster_path, override_path=f'{S3_PATH}/{raster_filename}')
s3.meta.client.upload_file(raster_path, S3_BUCKET,
f'{S3_RASTER_FOLDER}/{raster_filename}')
# upload database to S3
s3.meta.client.upload_file(DB_NAME, S3_BUCKET, DB_NAME)
| 27.553846 | 92 | 0.695142 |
import os
import re
import glob
import tqdm
import boto3
s3 = boto3.resource('s3')
import terracotta as tc
DB_NAME = 'terracotta.sqlite'
RASTER_GLOB = r'/path/to/rasters/*.tif'
RASTER_NAME_PATTERN = r'(?P<sensor>\w{2})_(?P<tile>\w{5})_(?P<date>\d{8})_(?P<band>\w+).tif'
KEYS = ('sensor', 'tile', 'date', 'band')
KEY_DESCRIPTIONS = {
'sensor': 'Sensor short name',
'tile': 'Sentinel-2 tile ID',
'date': 'Sensing date',
'band': 'Band or index name'
}
S3_BUCKET = 'tc-testdata'
S3_RASTER_FOLDER = 'rasters'
S3_PATH = f's3://{S3_BUCKET}/{S3_RASTER_FOLDER}'
driver = tc.get_driver(DB_NAME)
if not os.path.isfile(DB_NAME):
driver.create(KEYS, KEY_DESCRIPTIONS)
# sanity check
assert driver.key_names == KEYS
available_datasets = driver.get_datasets()
raster_files = list(glob.glob(RASTER_GLOB))
pbar = tqdm.tqdm(raster_files)
for raster_path in pbar:
pbar.set_postfix(file=raster_path)
raster_filename = os.path.basename(raster_path)
# extract keys from filename
match = re.match(RASTER_NAME_PATTERN, raster_filename)
if match is None:
raise ValueError(f'Input file {raster_filename} does not match raster pattern')
keys = match.groups()
# skip already processed data
if keys in available_datasets:
continue
with driver.connect():
# since the rasters will be served from S3, we need to pass the correct remote path
driver.insert(keys, raster_path, override_path=f'{S3_PATH}/{raster_filename}')
s3.meta.client.upload_file(raster_path, S3_BUCKET,
f'{S3_RASTER_FOLDER}/{raster_filename}')
# upload database to S3
s3.meta.client.upload_file(DB_NAME, S3_BUCKET, DB_NAME)
| true | true |
f7f4a53437399c7fe17de4ff690fc933eba1457f | 1,232 | py | Python | siapp/tests/static_pages/test_vews.py | saidulislam/siapp-python-crud-template | 4ee8ae8855f703eee36031244341a88f5c8dd2e2 | [
"Apache-2.0"
] | null | null | null | siapp/tests/static_pages/test_vews.py | saidulislam/siapp-python-crud-template | 4ee8ae8855f703eee36031244341a88f5c8dd2e2 | [
"Apache-2.0"
] | null | null | null | siapp/tests/static_pages/test_vews.py | saidulislam/siapp-python-crud-template | 4ee8ae8855f703eee36031244341a88f5c8dd2e2 | [
"Apache-2.0"
] | null | null | null | from flask import url_for
# to run test
# docker-compose exec website py.test siapp/tests
# to check code coverage
# docker-compose exec website py.test --conv-report term-missing --cov siapp
# running static code analysis
# docker-compose exec website flake8 . --exclude __init__.py
# or docker-compose exec website flake8 siapp --exclude __init__.py
class TestPage(object):
def test_home_page(self, client):
""" Home page should respond with a success 200. """
response = client.get(url_for('static_pages.home'))
assert response.status_code == 200
def test_terms_page(self, client):
""" Terms page should respond with a success 200. """
response = client.get(url_for('static_pages.terms'))
assert response.status_code == 200
def test_privacy_page(self, client):
""" Privacy page should respond with a success 200. """
response = client.get(url_for('static_pages.privacy'))
assert response.status_code == 200
def test_faq_page(self, client):
""" faq page should respond with a success 200. """
response = client.get(url_for('static_pages.faq'))
assert response.status_code == 200
| 37.333333 | 77 | 0.671266 | from flask import url_for
class TestPage(object):
def test_home_page(self, client):
response = client.get(url_for('static_pages.home'))
assert response.status_code == 200
def test_terms_page(self, client):
response = client.get(url_for('static_pages.terms'))
assert response.status_code == 200
def test_privacy_page(self, client):
response = client.get(url_for('static_pages.privacy'))
assert response.status_code == 200
def test_faq_page(self, client):
response = client.get(url_for('static_pages.faq'))
assert response.status_code == 200
| true | true |
f7f4a53a4769b38e22fde862434d5d2277839fd6 | 1,107 | py | Python | Chapter 08/Chap08_Example8.71.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 08/Chap08_Example8.71.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 08/Chap08_Example8.71.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | class Human:
def __init__(self,myname,myage):
self.myname = myname
self.myage = myage
def mydisplay(self):
print("The name is: ", self.myname)
print("The age is: ", self.myage)
class Mystudent(Human):
def __init__(self,myname,myage, mycity, myhobby):
super().__init__(myname, myage)
self.mycity = mycity
self.myhobby = myhobby
def mydisplay(self):
super().mydisplay()
print("The city is: ", self.mycity)
print("The hobby is: ", self.myhobby)
class Myemployee(Human):
def __init__(self,myname,myage, mystaffno, mycontactno):
super().__init__(myname, myage)
self.mystaffno = mystaffno
self.mycontactno = mycontactno
def mydisplay(self):
super().mydisplay()
print("The staff number is: ", self.mystaffno)
print("The contact number is: ", self.mycontactno)
myst = Mystudent('Ram',16,'Hyderabad','Studying')
myemp = Myemployee('Surendra',54,60001,9406121337)
myst.mydisplay()
print("*"*25)
myemp.mydisplay() | 30.75 | 61 | 0.607949 | class Human:
def __init__(self,myname,myage):
self.myname = myname
self.myage = myage
def mydisplay(self):
print("The name is: ", self.myname)
print("The age is: ", self.myage)
class Mystudent(Human):
def __init__(self,myname,myage, mycity, myhobby):
super().__init__(myname, myage)
self.mycity = mycity
self.myhobby = myhobby
def mydisplay(self):
super().mydisplay()
print("The city is: ", self.mycity)
print("The hobby is: ", self.myhobby)
class Myemployee(Human):
def __init__(self,myname,myage, mystaffno, mycontactno):
super().__init__(myname, myage)
self.mystaffno = mystaffno
self.mycontactno = mycontactno
def mydisplay(self):
super().mydisplay()
print("The staff number is: ", self.mystaffno)
print("The contact number is: ", self.mycontactno)
myst = Mystudent('Ram',16,'Hyderabad','Studying')
myemp = Myemployee('Surendra',54,60001,9406121337)
myst.mydisplay()
print("*"*25)
myemp.mydisplay() | true | true |
f7f4a55f16b151737e710d9bef426ebc681b7c96 | 1,263 | py | Python | oxe-api/test/resource/setting/test_add_setting.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/test/resource/setting/test_add_setting.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/test/resource/setting/test_add_setting.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | from test.BaseCase import BaseCase
class TestAddSetting(BaseCase):
@BaseCase.login
@BaseCase.grant_access("/setting/add_setting")
def test_ok(self, token):
payload = {
"property": "PROP",
"value": "VALUE",
}
response = self.application.post('/setting/add_setting',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual(200, response.status_code)
self.assertEqual(self.db.get_count(self.db.tables["Setting"]), 1)
@BaseCase.login
@BaseCase.grant_access("/setting/add_setting")
def test_ko_already_exists(self, token):
self.db.insert({"property": "PROP", "value": "VALUE"}, self.db.tables["Setting"])
payload = {
"property": "PROP",
"value": "VALUE",
}
response = self.application.post('/setting/add_setting',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("422 Provided setting already exists", response.status)
self.assertEqual(self.db.get_count(self.db.tables["Setting"]), 1)
| 33.236842 | 89 | 0.563737 | from test.BaseCase import BaseCase
class TestAddSetting(BaseCase):
@BaseCase.login
@BaseCase.grant_access("/setting/add_setting")
def test_ok(self, token):
payload = {
"property": "PROP",
"value": "VALUE",
}
response = self.application.post('/setting/add_setting',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual(200, response.status_code)
self.assertEqual(self.db.get_count(self.db.tables["Setting"]), 1)
@BaseCase.login
@BaseCase.grant_access("/setting/add_setting")
def test_ko_already_exists(self, token):
self.db.insert({"property": "PROP", "value": "VALUE"}, self.db.tables["Setting"])
payload = {
"property": "PROP",
"value": "VALUE",
}
response = self.application.post('/setting/add_setting',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("422 Provided setting already exists", response.status)
self.assertEqual(self.db.get_count(self.db.tables["Setting"]), 1)
| true | true |
f7f4a615790e9f400b2e30ce776b836f61e63468 | 12,064 | py | Python | backend/scripts/curation/leng2020_AD/curate.py | isabella232/corpora-data-portal | 09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9 | [
"MIT"
] | null | null | null | backend/scripts/curation/leng2020_AD/curate.py | isabella232/corpora-data-portal | 09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9 | [
"MIT"
] | 1 | 2021-02-23T22:56:13.000Z | 2021-02-23T22:56:13.000Z | backend/scripts/curation/leng2020_AD/curate.py | isabella232/corpora-data-portal | 09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9 | [
"MIT"
] | null | null | null | """Create the 'original' and 'remix' datasets for the snRNAseq of human neurons AD (Leng,
et. al. 2020) biorxiv preprint submission"""
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from scipy.sparse import csr_matrix
import utils.hgnc
import utils.ontology
def basic_curation(adata):
"""Changes to create the matrix for presentation in cellxgene."""
# Check if there are duplicate cell or gene IDs
if not adata.obs.index.is_unique:
raise Exception("Cell IDs not unique.")
if not adata.var.index.is_unique:
raise Exception("Gene symbols not unique.")
# These are deleted at the request of the submitter
del adata.obsm["X_CCA"]
del adata.obsm["X_CCA.ALIGNED"]
adata.uns["contributors"] = [
{"name": "Kun Leng"},
{"name": "Emmy Li"},
{"name": "Rana Eser"},
{"name": "Antonia Piergies"},
{"name": "Rene Sit"},
{"name": "Michelle Tan"},
{"name": "Norma Neff"},
{"name": "Song Hua Li"},
{"name": "Roberta Diehl Rodriguez"},
{"name": "Claudia Kimie Suemoto"},
{"name": "Renata Elaine Paraizo Leite"},
{"name": "Carlos A. Pasqualucci"},
{"name": "William W. Seeley"},
{"name": "Salvatore Spina"},
{"name": "Helmut Heinsen"},
{"name": "Lea T. Grinberg", "email": "lea.grinberg@ucsf.edu"},
{"name": "Martin Kampmann", "email": "martin.kampmann@ucsf.edu"},
]
adata.uns["preprint_doi"] = "https://doi.org/10.1101/2020.04.04.025825"
adata.uns["default_embedding"] = "X_tSNE"
def remix(adata, title: str):
"""Create the full Corpora remix"""
# First fill in missing metadata fields
adata.obs["assay_ontology"] = "EFO:0009899"
adata.obs["assay"] = utils.ontology.get_ontology_label("EFO:0009899")
adata.obs["sex"] = "male"
adata.obs["disease_ontology"] = "MONDO:0004975"
adata.obs["disease"] = utils.ontology.get_ontology_label("MONDO:0004975")
adata.obs["tissue_ontology"] = "UBERON:0002728"
adata.obs["tissue"] = utils.ontology.get_ontology_label("UBERON:0002728")
adata.uns["organism_ontology"] = "NCBITaxon:9606"
adata.uns["organism"] = utils.ontology.get_ontology_label("NCBITaxon:9606")
adata.uns["title"] = title
adata.uns["project_name"] = "Molecular characterization of selectively vulnerable neurons in " "Alzheimer’s Disease"
adata.uns["project_description"] = (
"Single-nuclei RNA sequencing of caudal entorhinal cortex and "
"superior frontal gyrus from individuals spanning the "
"neuropathological progression of AD"
)
adata.uns["project_raw_data_links"] = ["https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE147528"]
adata.uns["project_other_links"] = ["https://www.synapse.org/#!Synapse:syn21788402/wiki/601825"]
# Set the cell ontology values
cell_type_map = {
"Exc": "excitatory neuron",
"OPC": "oligodendrocyte precursor cell",
"Inh": "inhibitory neuron",
"Micro": "mature microglial cell",
"Astro": "mature astrocyte",
"Oligo": "oligodendrocyte",
"Endo": "endothelial cell",
}
adata.obs["cell_type"] = adata.obs["clusterAssignment"].str.split(":|\\.", expand=True)[1].map(cell_type_map)
del adata.obs["clusterAssignment"]
# make dictionary mapping cell_type to CL term
cell_type_ontology_map = {
cell_type: utils.ontology.lookup_candidate_term(cell_type)[0][0]
for cell_type in adata.obs["cell_type"].unique()
}
# result: {'excitatory neuron': 'CL:0008030', 'oligodendrocyte precursor cell': 'CL:0002453',
# 'inhibitory neuron': 'CL:0008029', 'mature microglial cell': 'CL:0002629',
# 'mature astrocyte': 'CL:0002627', 'oligodendrocyte': 'CL:0000128', 'endothelial cell':
# 'CL:0000115'}
adata.obs["cell_type_ontology"] = adata.obs["cell_type"].map(cell_type_ontology_map)
# optional
adata.uns["tags"] = ["AD", "Alzheimer's Disease", "neurons"]
# Now translate the gene symbols and sum new duplicates
# Note that we're pulling from raw here. That's where the raw counts that we can sum are
upgraded_var_index = utils.hgnc.get_upgraded_var_index(adata.var)
merged_raw_counts = pd.DataFrame.sparse.from_spmatrix(
adata.raw.X,
index=adata.obs.index,
columns=upgraded_var_index,
).sum(axis=1, level=0, skipna=False)
# Create the new anndata object with the summed values
remix_adata = anndata.AnnData(
X=merged_raw_counts,
obs=adata.obs,
var=merged_raw_counts.columns.to_frame(name="hgnc_gene_symbol"),
uns=adata.uns,
obsm=adata.obsm,
)
remix_adata.raw = remix_adata.copy()
# Perform the same tranformations on the new values as they did in the paper
# Divide counts of each cell by sizeFactors from logNormCounts used by author
r, c = remix_adata.X.nonzero()
rX_sp = csr_matrix(((1.0 / remix_adata.obs.sizeFactors)[r], (r, c)), shape=(remix_adata.X.shape))
remix_adata.X = remix_adata.X.multiply(rX_sp)
sc.pp.log1p(remix_adata, base=2)
# Finally describe the layers and we're done
remix_adata.uns["layer_descriptions"] = {
"raw.X": "raw",
"X": "logNormCounts",
}
return remix_adata
def print_summary(adata):
"""Print out a little summary of the metadata."""
print(adata.obs.dtypes)
for column in adata.obs.nunique().items():
field, n_unique = column
if n_unique > 1000 and not np.issubdtype(adata.obs[field].dtype, np.number):
print("TOO MANY:", field)
elif n_unique == 1:
print("ONLY ONE:", field)
# Print missing cell fields required by Corpora schema
remix_cellfields = np.array(
[
"tissue",
"assay",
"disease",
"cell_type",
"sex",
"ethnicity",
"tissue_ontology",
"assay_ontology",
"disease_ontology",
"cell_type_ontology",
"ethnicity_ontology",
]
)
missing_remix_cellfields = np.array(set(remix_cellfields) - set(adata.obs.columns.values))
print("MISSING CORPORA FIELDS:", missing_remix_cellfields)
# Process EC_all
ad = sc.read_h5ad("EC_allCells/kampmann_lab_human_AD_snRNAseq_EC.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("EC_allCells/kampmann_lab_human_AD_snRNAseq_EC-curated.h5ad", compression="gzip")
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: caudal entorhinal cortex",
)
print_summary(rad)
rad.write("EC_allCells/kampmann_lab_human_AD_snRNAseq_EC-remixed.h5ad", compression="gzip")
# Process SFG_all
ad = sc.read_h5ad("SFG_allCells/kampmann_lab_human_AD_snRNAseq_SFG.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("SFG_allCells/kampmann_lab_human_AD_snRNAseq_SFG-curated.h5ad", compression="gzip")
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: superior frontal gyrus",
)
print_summary(rad)
rad.write("SFG_allCells/kampmann_lab_human_AD_snRNAseq_SFG-remixed.h5ad", compression="gzip")
# Process EC_astrocytes
ad = sc.read_h5ad("EC_subclusters/EC_astrocytes/kampmann_lab_human_AD_snRNAseq_EC_astrocytes.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("EC_subclusters/EC_astrocytes/kampmann_lab_human_AD_snRNAseq_EC_astrocytes-curated.h5ad", compression="gzip")
rad = remix(
ad, title="Molecular characterization of selectively vulnerable neurons in " "Alzheimer’s Disease: EC astrocytes"
)
print_summary(rad)
rad.write("EC_subclusters/EC_astrocytes/kampmann_lab_human_AD_snRNAseq_EC_astrocytes-remixed.h5ad", compression="gzip")
# Process EC_excitatoryNeurons
ad = sc.read_h5ad("EC_subclusters/EC_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_excitatoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"EC_subclusters/EC_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_excitatoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: EC excitatoryNeurons",
)
print_summary(rad)
rad.write(
"EC_subclusters/EC_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_excitatoryNeurons-remixed.h5ad",
compression="gzip",
)
# Process EC_inhibitoryNeurons
ad = sc.read_h5ad("EC_subclusters/EC_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_inhibitoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"EC_subclusters/EC_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_inhibitoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: EC inhibitoryNeurons",
)
print_summary(rad)
rad.write(
"EC_subclusters/EC_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_inhibitoryNeurons-remixed.h5ad",
compression="gzip",
)
# Process EC_microglia
ad = sc.read_h5ad("EC_subclusters/EC_microglia/kampmann_lab_human_AD_snRNAseq_EC_microglia.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("EC_subclusters/EC_microglia/kampmann_lab_human_AD_snRNAseq_EC_microglia-curated.h5ad", compression="gzip")
rad = remix(
ad, title="Molecular characterization of selectively vulnerable neurons in " "Alzheimer’s Disease: EC microglia"
)
print_summary(rad)
rad.write("EC_subclusters/EC_microglia/kampmann_lab_human_AD_snRNAseq_EC_microglia-remixed.h5ad", compression="gzip")
# Process SFG_astrocytes
ad = sc.read_h5ad("SFG_subclusters/SFG_astrocytes/kampmann_lab_human_AD_snRNAseq_SFG_astrocytes.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"SFG_subclusters/SFG_astrocytes/kampmann_lab_human_AD_snRNAseq_SFG_astrocytes-curated.h5ad", compression="gzip"
)
rad = remix(
ad, title="MolSFGular characterization of selSFGtively vulnerable neurons in " "Alzheimer’s Disease: SFG astrocytes"
)
print_summary(rad)
rad.write(
"SFG_subclusters/SFG_astrocytes/kampmann_lab_human_AD_snRNAseq_SFG_astrocytes-remixed.h5ad", compression="gzip"
)
# Process SFG_excitatoryNeurons
ad = sc.read_h5ad("SFG_subclusters/SFG_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_excitatoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"SFG_subclusters/SFG_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_excitatoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="MolSFGular characterization of selSFGtively vulnerable neurons in "
"Alzheimer’s Disease: SFG excitatoryNeurons",
)
print_summary(rad)
rad.write(
"SFG_subclusters/SFG_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_excitatoryNeurons-remixed.h5ad",
compression="gzip",
)
# Process SFG_inhibitoryNeurons
ad = sc.read_h5ad("SFG_subclusters/SFG_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_inhibitoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"SFG_subclusters/SFG_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_inhibitoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="MolSFGular characterization of selSFGtively vulnerable neurons in "
"Alzheimer’s Disease: SFG inhibitoryNeurons",
)
print_summary(rad)
rad.write(
"SFG_subclusters/SFG_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_inhibitoryNeurons-remixed.h5ad",
compression="gzip",
)
# Process SFG_microglia
ad = sc.read_h5ad("SFG_subclusters/SFG_microglia/kampmann_lab_human_AD_snRNAseq_SFG_microglia.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("SFG_subclusters/SFG_microglia/kampmann_lab_human_AD_snRNAseq_SFG_microglia-curated.h5ad", compression="gzip")
rad = remix(
ad, title="MolSFGular characterization of selSFGtively vulnerable neurons in " "Alzheimer’s Disease: SFG microglia"
)
print_summary(rad)
rad.write("SFG_subclusters/SFG_microglia/kampmann_lab_human_AD_snRNAseq_SFG_microglia-remixed.h5ad", compression="gzip")
| 37.12 | 120 | 0.728697 |
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from scipy.sparse import csr_matrix
import utils.hgnc
import utils.ontology
def basic_curation(adata):
if not adata.obs.index.is_unique:
raise Exception("Cell IDs not unique.")
if not adata.var.index.is_unique:
raise Exception("Gene symbols not unique.")
del adata.obsm["X_CCA"]
del adata.obsm["X_CCA.ALIGNED"]
adata.uns["contributors"] = [
{"name": "Kun Leng"},
{"name": "Emmy Li"},
{"name": "Rana Eser"},
{"name": "Antonia Piergies"},
{"name": "Rene Sit"},
{"name": "Michelle Tan"},
{"name": "Norma Neff"},
{"name": "Song Hua Li"},
{"name": "Roberta Diehl Rodriguez"},
{"name": "Claudia Kimie Suemoto"},
{"name": "Renata Elaine Paraizo Leite"},
{"name": "Carlos A. Pasqualucci"},
{"name": "William W. Seeley"},
{"name": "Salvatore Spina"},
{"name": "Helmut Heinsen"},
{"name": "Lea T. Grinberg", "email": "lea.grinberg@ucsf.edu"},
{"name": "Martin Kampmann", "email": "martin.kampmann@ucsf.edu"},
]
adata.uns["preprint_doi"] = "https://doi.org/10.1101/2020.04.04.025825"
adata.uns["default_embedding"] = "X_tSNE"
def remix(adata, title: str):
adata.obs["assay_ontology"] = "EFO:0009899"
adata.obs["assay"] = utils.ontology.get_ontology_label("EFO:0009899")
adata.obs["sex"] = "male"
adata.obs["disease_ontology"] = "MONDO:0004975"
adata.obs["disease"] = utils.ontology.get_ontology_label("MONDO:0004975")
adata.obs["tissue_ontology"] = "UBERON:0002728"
adata.obs["tissue"] = utils.ontology.get_ontology_label("UBERON:0002728")
adata.uns["organism_ontology"] = "NCBITaxon:9606"
adata.uns["organism"] = utils.ontology.get_ontology_label("NCBITaxon:9606")
adata.uns["title"] = title
adata.uns["project_name"] = "Molecular characterization of selectively vulnerable neurons in " "Alzheimer’s Disease"
adata.uns["project_description"] = (
"Single-nuclei RNA sequencing of caudal entorhinal cortex and "
"superior frontal gyrus from individuals spanning the "
"neuropathological progression of AD"
)
adata.uns["project_raw_data_links"] = ["https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE147528"]
adata.uns["project_other_links"] = ["https://www.synapse.org/#!Synapse:syn21788402/wiki/601825"]
cell_type_map = {
"Exc": "excitatory neuron",
"OPC": "oligodendrocyte precursor cell",
"Inh": "inhibitory neuron",
"Micro": "mature microglial cell",
"Astro": "mature astrocyte",
"Oligo": "oligodendrocyte",
"Endo": "endothelial cell",
}
adata.obs["cell_type"] = adata.obs["clusterAssignment"].str.split(":|\\.", expand=True)[1].map(cell_type_map)
del adata.obs["clusterAssignment"]
cell_type_ontology_map = {
cell_type: utils.ontology.lookup_candidate_term(cell_type)[0][0]
for cell_type in adata.obs["cell_type"].unique()
}
adata.obs["cell_type_ontology"] = adata.obs["cell_type"].map(cell_type_ontology_map)
adata.uns["tags"] = ["AD", "Alzheimer's Disease", "neurons"]
# Now translate the gene symbols and sum new duplicates
# Note that we're pulling from raw here. That's where the raw counts that we can sum are
upgraded_var_index = utils.hgnc.get_upgraded_var_index(adata.var)
merged_raw_counts = pd.DataFrame.sparse.from_spmatrix(
adata.raw.X,
index=adata.obs.index,
columns=upgraded_var_index,
).sum(axis=1, level=0, skipna=False)
# Create the new anndata object with the summed values
remix_adata = anndata.AnnData(
X=merged_raw_counts,
obs=adata.obs,
var=merged_raw_counts.columns.to_frame(name="hgnc_gene_symbol"),
uns=adata.uns,
obsm=adata.obsm,
)
remix_adata.raw = remix_adata.copy()
# Perform the same tranformations on the new values as they did in the paper
# Divide counts of each cell by sizeFactors from logNormCounts used by author
r, c = remix_adata.X.nonzero()
rX_sp = csr_matrix(((1.0 / remix_adata.obs.sizeFactors)[r], (r, c)), shape=(remix_adata.X.shape))
remix_adata.X = remix_adata.X.multiply(rX_sp)
sc.pp.log1p(remix_adata, base=2)
# Finally describe the layers and we're done
remix_adata.uns["layer_descriptions"] = {
"raw.X": "raw",
"X": "logNormCounts",
}
return remix_adata
def print_summary(adata):
print(adata.obs.dtypes)
for column in adata.obs.nunique().items():
field, n_unique = column
if n_unique > 1000 and not np.issubdtype(adata.obs[field].dtype, np.number):
print("TOO MANY:", field)
elif n_unique == 1:
print("ONLY ONE:", field)
remix_cellfields = np.array(
[
"tissue",
"assay",
"disease",
"cell_type",
"sex",
"ethnicity",
"tissue_ontology",
"assay_ontology",
"disease_ontology",
"cell_type_ontology",
"ethnicity_ontology",
]
)
missing_remix_cellfields = np.array(set(remix_cellfields) - set(adata.obs.columns.values))
print("MISSING CORPORA FIELDS:", missing_remix_cellfields)
ad = sc.read_h5ad("EC_allCells/kampmann_lab_human_AD_snRNAseq_EC.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("EC_allCells/kampmann_lab_human_AD_snRNAseq_EC-curated.h5ad", compression="gzip")
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: caudal entorhinal cortex",
)
print_summary(rad)
rad.write("EC_allCells/kampmann_lab_human_AD_snRNAseq_EC-remixed.h5ad", compression="gzip")
ad = sc.read_h5ad("SFG_allCells/kampmann_lab_human_AD_snRNAseq_SFG.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("SFG_allCells/kampmann_lab_human_AD_snRNAseq_SFG-curated.h5ad", compression="gzip")
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: superior frontal gyrus",
)
print_summary(rad)
rad.write("SFG_allCells/kampmann_lab_human_AD_snRNAseq_SFG-remixed.h5ad", compression="gzip")
ad = sc.read_h5ad("EC_subclusters/EC_astrocytes/kampmann_lab_human_AD_snRNAseq_EC_astrocytes.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("EC_subclusters/EC_astrocytes/kampmann_lab_human_AD_snRNAseq_EC_astrocytes-curated.h5ad", compression="gzip")
rad = remix(
ad, title="Molecular characterization of selectively vulnerable neurons in " "Alzheimer’s Disease: EC astrocytes"
)
print_summary(rad)
rad.write("EC_subclusters/EC_astrocytes/kampmann_lab_human_AD_snRNAseq_EC_astrocytes-remixed.h5ad", compression="gzip")
ad = sc.read_h5ad("EC_subclusters/EC_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_excitatoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"EC_subclusters/EC_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_excitatoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: EC excitatoryNeurons",
)
print_summary(rad)
rad.write(
"EC_subclusters/EC_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_excitatoryNeurons-remixed.h5ad",
compression="gzip",
)
ad = sc.read_h5ad("EC_subclusters/EC_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_inhibitoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"EC_subclusters/EC_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_inhibitoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="Molecular characterization of selectively vulnerable neurons in "
"Alzheimer’s Disease: EC inhibitoryNeurons",
)
print_summary(rad)
rad.write(
"EC_subclusters/EC_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_EC_inhibitoryNeurons-remixed.h5ad",
compression="gzip",
)
ad = sc.read_h5ad("EC_subclusters/EC_microglia/kampmann_lab_human_AD_snRNAseq_EC_microglia.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("EC_subclusters/EC_microglia/kampmann_lab_human_AD_snRNAseq_EC_microglia-curated.h5ad", compression="gzip")
rad = remix(
ad, title="Molecular characterization of selectively vulnerable neurons in " "Alzheimer’s Disease: EC microglia"
)
print_summary(rad)
rad.write("EC_subclusters/EC_microglia/kampmann_lab_human_AD_snRNAseq_EC_microglia-remixed.h5ad", compression="gzip")
ad = sc.read_h5ad("SFG_subclusters/SFG_astrocytes/kampmann_lab_human_AD_snRNAseq_SFG_astrocytes.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"SFG_subclusters/SFG_astrocytes/kampmann_lab_human_AD_snRNAseq_SFG_astrocytes-curated.h5ad", compression="gzip"
)
rad = remix(
ad, title="MolSFGular characterization of selSFGtively vulnerable neurons in " "Alzheimer’s Disease: SFG astrocytes"
)
print_summary(rad)
rad.write(
"SFG_subclusters/SFG_astrocytes/kampmann_lab_human_AD_snRNAseq_SFG_astrocytes-remixed.h5ad", compression="gzip"
)
ad = sc.read_h5ad("SFG_subclusters/SFG_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_excitatoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"SFG_subclusters/SFG_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_excitatoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="MolSFGular characterization of selSFGtively vulnerable neurons in "
"Alzheimer’s Disease: SFG excitatoryNeurons",
)
print_summary(rad)
rad.write(
"SFG_subclusters/SFG_excitatoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_excitatoryNeurons-remixed.h5ad",
compression="gzip",
)
ad = sc.read_h5ad("SFG_subclusters/SFG_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_inhibitoryNeurons.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write(
"SFG_subclusters/SFG_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_inhibitoryNeurons-curated.h5ad",
compression="gzip",
)
rad = remix(
ad,
title="MolSFGular characterization of selSFGtively vulnerable neurons in "
"Alzheimer’s Disease: SFG inhibitoryNeurons",
)
print_summary(rad)
rad.write(
"SFG_subclusters/SFG_inhibitoryNeurons/kampmann_lab_human_AD_snRNAseq_SFG_inhibitoryNeurons-remixed.h5ad",
compression="gzip",
)
ad = sc.read_h5ad("SFG_subclusters/SFG_microglia/kampmann_lab_human_AD_snRNAseq_SFG_microglia.h5ad")
basic_curation(ad)
print_summary(ad)
ad.write("SFG_subclusters/SFG_microglia/kampmann_lab_human_AD_snRNAseq_SFG_microglia-curated.h5ad", compression="gzip")
rad = remix(
ad, title="MolSFGular characterization of selSFGtively vulnerable neurons in " "Alzheimer’s Disease: SFG microglia"
)
print_summary(rad)
rad.write("SFG_subclusters/SFG_microglia/kampmann_lab_human_AD_snRNAseq_SFG_microglia-remixed.h5ad", compression="gzip")
| true | true |
f7f4a74970adef7ee7e2bfe178852008403a1b3c | 3,118 | py | Python | News_fetcher/Sure/sure_info.py | PPjaisri/Senior-project | cf29a51bdff33e1cc9ae505b454a002457bc3245 | [
"MIT"
] | null | null | null | News_fetcher/Sure/sure_info.py | PPjaisri/Senior-project | cf29a51bdff33e1cc9ae505b454a002457bc3245 | [
"MIT"
] | null | null | null | News_fetcher/Sure/sure_info.py | PPjaisri/Senior-project | cf29a51bdff33e1cc9ae505b454a002457bc3245 | [
"MIT"
] | null | null | null | import os
import csv
import time
import logging
import requests
import pandas as pd
from bs4 import BeautifulSoup
class sure_info(object):
path = os.getcwd()
path = os.path.dirname(path)
# If directly run this file --> uncomment line 16 and 17.
path = os.path.dirname(path)
input_path = os.path.join(path, 'result\\Sure\\sure_thread.csv')
save_path = os.path.join(path, 'result\\Sure\\sure_info.csv')
logging.basicConfig(level=logging.DEBUG)
def __init__(self):
self.fetch_data = []
self.current_page = 1
self.finish = False
self.last_link = ''
self.count = 0
def read_latest_save(self):
try:
data = pd.read_csv(self.save_path, encoding='utf-8')
last_link = data.iloc[-1]['link']
return last_link
except:
return ''
def finished_crawl(self):
logging.info(f'Crawled {self.count} pages')
with open(self.save_path, 'a', encoding='utf-8', newline='') as file:
fieldnames = ['category', 'header', 'content', 'link', 'image', 'time']
writer = csv.DictWriter(file, fieldnames=fieldnames)
if self.last_link != '':
writer.writerows(self.fetch_data)
else:
writer.writeheader()
writer.writerows(self.fetch_data)
def fetch_page(self):
urls = []
self.last_link = self.read_latest_save()
with open(self.input_path, 'r', encoding='utf-8') as file:
data = file.readlines()
for obj in data:
if obj != '\n':
obj = obj.split(',')
urls.append(obj[1])
new_urls = []
for url in range(len(urls) - 1, 0, -1):
new_urls.append(urls[url])
for url in new_urls:
if url == self.last_link:
break
else:
self.count += 1
time.sleep(0.5)
self.crawl_page(url)
self.finished_crawl()
def crawl_page(self, url):
response = requests.get(url)
# logging.debug(f'Crawling at {url}')
soup = BeautifulSoup(response.text, 'lxml')
header = soup.h1.text.strip()
time = (soup.find('div', class_='entry-meta')).text
time = ' '.join(time.split())
entry_content = soup.find('div', class_='entry-content')
try:
category = entry_content.find_all('strong')[1].text
except:
category = None
content_blog = entry_content.select('p')
content = [(i.text).strip() for i in content_blog]
try:
image = (soup.find('div', class_='thumb').find('img'))['data-src']
except:
image = None
data = {
'category': category,
'header': header,
'content': content,
'link': url,
'image': image,
'time': time
}
self.fetch_data.insert(0, data)
if __name__ == '__main__':
sure = sure_info()
sure.fetch_page() | 28.87037 | 83 | 0.534958 | import os
import csv
import time
import logging
import requests
import pandas as pd
from bs4 import BeautifulSoup
class sure_info(object):
path = os.getcwd()
path = os.path.dirname(path)
path = os.path.dirname(path)
input_path = os.path.join(path, 'result\\Sure\\sure_thread.csv')
save_path = os.path.join(path, 'result\\Sure\\sure_info.csv')
logging.basicConfig(level=logging.DEBUG)
def __init__(self):
self.fetch_data = []
self.current_page = 1
self.finish = False
self.last_link = ''
self.count = 0
def read_latest_save(self):
try:
data = pd.read_csv(self.save_path, encoding='utf-8')
last_link = data.iloc[-1]['link']
return last_link
except:
return ''
def finished_crawl(self):
logging.info(f'Crawled {self.count} pages')
with open(self.save_path, 'a', encoding='utf-8', newline='') as file:
fieldnames = ['category', 'header', 'content', 'link', 'image', 'time']
writer = csv.DictWriter(file, fieldnames=fieldnames)
if self.last_link != '':
writer.writerows(self.fetch_data)
else:
writer.writeheader()
writer.writerows(self.fetch_data)
def fetch_page(self):
urls = []
self.last_link = self.read_latest_save()
with open(self.input_path, 'r', encoding='utf-8') as file:
data = file.readlines()
for obj in data:
if obj != '\n':
obj = obj.split(',')
urls.append(obj[1])
new_urls = []
for url in range(len(urls) - 1, 0, -1):
new_urls.append(urls[url])
for url in new_urls:
if url == self.last_link:
break
else:
self.count += 1
time.sleep(0.5)
self.crawl_page(url)
self.finished_crawl()
def crawl_page(self, url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
header = soup.h1.text.strip()
time = (soup.find('div', class_='entry-meta')).text
time = ' '.join(time.split())
entry_content = soup.find('div', class_='entry-content')
try:
category = entry_content.find_all('strong')[1].text
except:
category = None
content_blog = entry_content.select('p')
content = [(i.text).strip() for i in content_blog]
try:
image = (soup.find('div', class_='thumb').find('img'))['data-src']
except:
image = None
data = {
'category': category,
'header': header,
'content': content,
'link': url,
'image': image,
'time': time
}
self.fetch_data.insert(0, data)
if __name__ == '__main__':
sure = sure_info()
sure.fetch_page() | true | true |
f7f4a760f8759bc38003574648f76467155a2480 | 2,659 | py | Python | AO3/JSON2URL.py | ecjoseph42/toastystats | bd8fba7601bd4a11759bec7c826406fca67563c2 | [
"MIT"
] | 27 | 2019-07-28T03:33:04.000Z | 2022-03-30T18:56:14.000Z | AO3/JSON2URL.py | ecjoseph42/toastystats | bd8fba7601bd4a11759bec7c826406fca67563c2 | [
"MIT"
] | null | null | null | AO3/JSON2URL.py | ecjoseph42/toastystats | bd8fba7601bd4a11759bec7c826406fca67563c2 | [
"MIT"
] | 6 | 2019-08-02T21:41:53.000Z | 2022-02-08T22:15:13.000Z | import json
import sys
import convert
if len(sys.argv) < 2:
sys.exit('Usage: %s JSON_file [-verbose]' % sys.argv[0])
verbose = False
# get cmd line args
if len(sys.argv) > 2:
arg = sys.argv[2]
if arg == "-verbose" or arg == "-v":
verbose = True
filename = sys.argv[1]
if verbose:
print "filename: ", filename
# load JSON from file
try:
with open(filename) as f:
j = json.load(f)
except:
sys.exit("could not load JSON file")
try:
searches = j['searches']
except:
sys.exit("No 'searches' field in file")
for s in searches:
if verbose:
print "search: ", s
params = ''
# fetch the category(ies)
c = ''
try:
c = s['category']
tmp = convert.convertToAO3(c, 'cat', verbose)
c = tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "category: ", c
print "params ", params
except:
if verbose:
print "no category in search ", s
# fetch the warning(s)
w = ''
try:
w = s['warning']
tmp = convert.convertToAO3(w, 'warn', verbose)
w = tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "warning: ", w
print "params ", params
except:
if verbose:
print "no warning in search ", s
# fetch the tag(s)
t = '&tag_id='
try:
tag = s['tag']
tmp = convert.convertToAO3(tag, 'tag', verbose)
t += tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "tag: ", t
print "params ", params
except:
if verbose:
print "no tag in search ", s
# fetch the "search within results"
swr = '&work_search%5Bquery%5D='
try:
wth = s['search within results']
tmp = convert.convertToAO3(wth, 'within', verbose)
swr += tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "search within results: ", swr
print "params ", params
except:
if verbose:
print "no search within results in search ", s
# assemble the URL
urlprefix = 'http://archiveofourown.org/works?utf8=%E2%9C%93&commit=Sort+and+Filter&work_search%5Bsort_column%5D=revised_at'
urlprequery = '&work_search%5Bother_tag_names%5D='
urlpretag = '&work_search%5Blanguage_id%5D=&work_search%5Bcomplete%5D=0'
u = urlprefix + c + w + urlprequery + swr + urlpretag + t
if verbose:
print '***********'
print u
if verbose:
print '***********'
| 23.741071 | 128 | 0.522377 | import json
import sys
import convert
if len(sys.argv) < 2:
sys.exit('Usage: %s JSON_file [-verbose]' % sys.argv[0])
verbose = False
if len(sys.argv) > 2:
arg = sys.argv[2]
if arg == "-verbose" or arg == "-v":
verbose = True
filename = sys.argv[1]
if verbose:
print "filename: ", filename
try:
with open(filename) as f:
j = json.load(f)
except:
sys.exit("could not load JSON file")
try:
searches = j['searches']
except:
sys.exit("No 'searches' field in file")
for s in searches:
if verbose:
print "search: ", s
params = ''
c = ''
try:
c = s['category']
tmp = convert.convertToAO3(c, 'cat', verbose)
c = tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "category: ", c
print "params ", params
except:
if verbose:
print "no category in search ", s
w = ''
try:
w = s['warning']
tmp = convert.convertToAO3(w, 'warn', verbose)
w = tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "warning: ", w
print "params ", params
except:
if verbose:
print "no warning in search ", s
t = '&tag_id='
try:
tag = s['tag']
tmp = convert.convertToAO3(tag, 'tag', verbose)
t += tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "tag: ", t
print "params ", params
except:
if verbose:
print "no tag in search ", s
swr = '&work_search%5Bquery%5D='
try:
wth = s['search within results']
tmp = convert.convertToAO3(wth, 'within', verbose)
swr += tmp[0]
params += tmp[1]
params += ', '
if verbose:
print "search within results: ", swr
print "params ", params
except:
if verbose:
print "no search within results in search ", s
urlprefix = 'http://archiveofourown.org/works?utf8=%E2%9C%93&commit=Sort+and+Filter&work_search%5Bsort_column%5D=revised_at'
urlprequery = '&work_search%5Bother_tag_names%5D='
urlpretag = '&work_search%5Blanguage_id%5D=&work_search%5Bcomplete%5D=0'
u = urlprefix + c + w + urlprequery + swr + urlpretag + t
if verbose:
print '***********'
print u
if verbose:
print '***********'
| false | true |
f7f4aa0c5c1ea0b37e0ad6e001f8b1078807bcb6 | 6,122 | py | Python | scripts/model.py | OleguerCanal/kaggle_digit-recognizer | 89268df3e13744faacec5bf18bdc5071abf094d4 | [
"MIT"
] | null | null | null | scripts/model.py | OleguerCanal/kaggle_digit-recognizer | 89268df3e13744faacec5bf18bdc5071abf094d4 | [
"MIT"
] | null | null | null | scripts/model.py | OleguerCanal/kaggle_digit-recognizer | 89268df3e13744faacec5bf18bdc5071abf094d4 | [
"MIT"
] | null | null | null | import datetime
import os
import pandas as pd
import numpy as np
from pathlib import Path
import sys
import time
import yaml
# Keras
from keras.models import model_from_json
from keras.optimizers import RMSprop, Adam
from keras.preprocessing.image import ImageDataGenerator
# Own imports TODO(oleguer): Fix this path problem
sys.path.append(str(Path(__file__).parent))
from architectures.simple_cnn import simple_cnn_classification
from architectures.model2 import model2
from data_processing.preprocessing import preprocess_data
from helpers.callbacks import TensorBoard, ReduceLROnPlateau, ModelCheckpoint, TelegramSummary
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
class Model():
def __init__(self, param_yaml):
self.__load_params(param_yaml)
def __load_params(self, param_yaml):
stream = open(param_yaml, 'r')
self.params = yaml.load(stream, Loader = yaml.FullLoader)
def recover_logged_model(self, weights_path):
weights_name = weights_path.split("/")[-1]
full_model_path = weights_path.replace("/" + weights_name, "")
json_file = open(full_model_path + "/architecture.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights_path)
print("Loaded model from disk")
return loaded_model
def __log_model(self, path):
# Make sure dir exists
if not os.path.exists(path):
os.makedirs(path)
# Serialize model to JSON
model_json = self.model.to_json()
with open(path + "/architecture.json", "w") as json_file:
json_file.write(model_json)
# Save model params
with open(path + "/params.yaml", 'w') as outfile:
yaml.dump(self.params, outfile, default_flow_style=False)
def get_submission(self, mod, test, csv_path = "../input/solution.csv"):
results = mod.predict(test)
results = np.argmax(results, axis = 1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1, 28001), name = "ImageId"), results], axis = 1)
submission.to_csv(csv_path, index = False)
def train(self):
# 1. Load data
raw_train = pd.read_csv(self.params["data_path"])
raw_train = raw_train.sample(frac = self.params["sample_data"])
# 2. Process data
x_train, y_train, x_val, y_val = preprocess_data(raw_train)
del raw_train
# 3. Define Model
optimizer = RMSprop(
lr = float(self.params["learning_rate"]),
rho = float(self.params["rho"]),
epsilon = float(self.params["epsilon"]),
decay = float(self.params["decay"]))
if str(self.params["optimizer"]) == "Adam":
opimizer = Adam(float(self.params["learning_rate"]))
# self.model = simple_cnn_classification(input_shape = x_train[0].shape) # Default: Start with random weights
self.model = model2(input_shape = x_train[0].shape) # Default: Start with random weights
if self.params["train_from_saved_weights"]:
self.model = self.recover_logged_model(self.params["saved_weights_path"])
self.model.compile(
optimizer = optimizer,
loss = self.params["loss"],
metrics = self.params["metrics"])
# 4. Log model
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
save_path = str(self.params["model_logging_path"]) + "/" + str(time_stamp)
self.__log_model(path = save_path)
# Datagen
datagen_args = dict(rotation_range = 20,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.1)
datagen = ImageDataGenerator(**datagen_args)
datagen.fit(x_train)
# Callbacks:
weights_filepath = save_path + "/weights-{epoch:0f}-{val_acc:.4f}.hdf5"
checkpoint = ModelCheckpoint( # Save model weights after each epoch
filepath=weights_filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
telegram_summary = TelegramSummary()
log_dir = str(self.params["tensorboard_logging_path"]) + "/{}".format(time.time())
tensorboard = TensorBoard(log_dir = log_dir)
learning_rate_reduction = ReduceLROnPlateau(
monitor = 'val_acc',
patience = 5,
verbose = 1,
factor = 0.85, # Each patience epoch reduce lr by half
min_lr = 1e-10)
callbacks = [checkpoint, learning_rate_reduction, tensorboard, telegram_summary]
# 4. Fit Model
self.model.summary()
history = self.model.fit_generator(
generator = datagen.flow(x_train, y_train, batch_size = self.params["batch_size"]),
epochs = self.params["epochs"],
validation_data = (x_val, y_val),
verbose = 1,
callbacks = callbacks,
steps_per_epoch = x_train.shape[0] // self.params["batch_size"]) # // is floor division
# TODO(oleguer): Log history?
return
def test(self, data):
#TODO(oleguer): self.model.predict
pass
def analyze(self):
pass
| 40.813333 | 118 | 0.582163 | import datetime
import os
import pandas as pd
import numpy as np
from pathlib import Path
import sys
import time
import yaml
from keras.models import model_from_json
from keras.optimizers import RMSprop, Adam
from keras.preprocessing.image import ImageDataGenerator
sys.path.append(str(Path(__file__).parent))
from architectures.simple_cnn import simple_cnn_classification
from architectures.model2 import model2
from data_processing.preprocessing import preprocess_data
from helpers.callbacks import TensorBoard, ReduceLROnPlateau, ModelCheckpoint, TelegramSummary
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
class Model():
def __init__(self, param_yaml):
self.__load_params(param_yaml)
def __load_params(self, param_yaml):
stream = open(param_yaml, 'r')
self.params = yaml.load(stream, Loader = yaml.FullLoader)
def recover_logged_model(self, weights_path):
weights_name = weights_path.split("/")[-1]
full_model_path = weights_path.replace("/" + weights_name, "")
json_file = open(full_model_path + "/architecture.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(weights_path)
print("Loaded model from disk")
return loaded_model
def __log_model(self, path):
if not os.path.exists(path):
os.makedirs(path)
model_json = self.model.to_json()
with open(path + "/architecture.json", "w") as json_file:
json_file.write(model_json)
with open(path + "/params.yaml", 'w') as outfile:
yaml.dump(self.params, outfile, default_flow_style=False)
def get_submission(self, mod, test, csv_path = "../input/solution.csv"):
results = mod.predict(test)
results = np.argmax(results, axis = 1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1, 28001), name = "ImageId"), results], axis = 1)
submission.to_csv(csv_path, index = False)
def train(self):
raw_train = pd.read_csv(self.params["data_path"])
raw_train = raw_train.sample(frac = self.params["sample_data"])
x_train, y_train, x_val, y_val = preprocess_data(raw_train)
del raw_train
optimizer = RMSprop(
lr = float(self.params["learning_rate"]),
rho = float(self.params["rho"]),
epsilon = float(self.params["epsilon"]),
decay = float(self.params["decay"]))
if str(self.params["optimizer"]) == "Adam":
opimizer = Adam(float(self.params["learning_rate"]))
hape = x_train[0].shape)
if self.params["train_from_saved_weights"]:
self.model = self.recover_logged_model(self.params["saved_weights_path"])
self.model.compile(
optimizer = optimizer,
loss = self.params["loss"],
metrics = self.params["metrics"])
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
save_path = str(self.params["model_logging_path"]) + "/" + str(time_stamp)
self.__log_model(path = save_path)
datagen_args = dict(rotation_range = 20,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.1)
datagen = ImageDataGenerator(**datagen_args)
datagen.fit(x_train)
weights_filepath = save_path + "/weights-{epoch:0f}-{val_acc:.4f}.hdf5"
checkpoint = ModelCheckpoint(
filepath=weights_filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
telegram_summary = TelegramSummary()
log_dir = str(self.params["tensorboard_logging_path"]) + "/{}".format(time.time())
tensorboard = TensorBoard(log_dir = log_dir)
learning_rate_reduction = ReduceLROnPlateau(
monitor = 'val_acc',
patience = 5,
verbose = 1,
factor = 0.85,
min_lr = 1e-10)
callbacks = [checkpoint, learning_rate_reduction, tensorboard, telegram_summary]
self.model.summary()
history = self.model.fit_generator(
generator = datagen.flow(x_train, y_train, batch_size = self.params["batch_size"]),
epochs = self.params["epochs"],
validation_data = (x_val, y_val),
verbose = 1,
callbacks = callbacks,
steps_per_epoch = x_train.shape[0] // self.params["batch_size"])
return
def test(self, data):
pass
def analyze(self):
pass
| true | true |
f7f4aa647f0869b96289b26885822b5e604b12af | 25,908 | py | Python | pyaig/aig.py | sterin/pyaig | e630c6188e03bf98504ea74b27bf1279ba6708a8 | [
"MIT"
] | 4 | 2020-09-13T04:03:25.000Z | 2021-09-27T05:05:23.000Z | pyaig/aig.py | sterin/pyaig | e630c6188e03bf98504ea74b27bf1279ba6708a8 | [
"MIT"
] | null | null | null | pyaig/aig.py | sterin/pyaig | e630c6188e03bf98504ea74b27bf1279ba6708a8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Author: Baruch Sterin <sterin@berkeley.edu>
# Simple Python AIG package
from past.builtins import xrange
from future.utils import iteritems
import itertools
class _Node(object):
# Node types
CONST0 = 0
PI = 1
LATCH = 2
AND = 3
BUFFER = 4
# Latch initialization
INIT_ZERO = 0
INIT_ONE = 1
INIT_NONDET = 2
def __init__(self, node_type, left=0, right=0):
self._type = node_type
self._left = left
self._right = right
# creation
@staticmethod
def make_const0():
return _Node(_Node.CONST0)
@staticmethod
def make_pi(pi_id):
return _Node( _Node.PI, pi_id, 0)
@staticmethod
def make_latch(l_id, init, next=None):
return _Node( _Node.LATCH, l_id, (init, next))
@staticmethod
def make_and(left, right):
return _Node(_Node.AND, left, right)
@staticmethod
def make_buffer(buf_id, buf_in):
return _Node(_Node.BUFFER, buf_id, buf_in)
# query type
def is_const0(self):
return self._type == _Node.CONST0
def is_pi(self):
return self._type == _Node.PI
def is_and(self):
return self._type == _Node.AND
def is_buffer(self):
return self._type == _Node.BUFFER
def is_latch(self):
return self._type == _Node.LATCH
def is_nonterminal(self):
return self._type in (_Node.AND,_Node.BUFFER)
def get_fanins(self):
if self._type == _Node.AND:
return [self._left, self._right]
elif self._type == _Node.BUFFER:
return [self._right]
else:
return []
def get_seq_fanins(self):
if self._type == _Node.AND:
return [self._left, self._right]
elif self._type == _Node.BUFFER:
return [self._right]
elif self._type == _Node.LATCH:
return [self._right[1]]
else:
return []
# AND gates
def get_left(self):
assert self.is_and()
return self._left
def get_right(self):
assert self.is_and()
return self._right
# Buffer
def get_buf_id(self):
return self._left
def get_buf_in(self):
assert self.is_buffer()
return self._right
def set_buf_in(self, f):
assert self.is_buffer()
self._right = f
def convert_buf_to_pi(self, pi_id):
assert self.is_buffer()
self._type = _Node.PI
self._left = pi_id
self._right = 0
# PIs
def get_pi_id(self):
assert self.is_pi()
return self._left
def get_latch_id(self):
assert self.is_latch()
return self._left
# Latches
def get_init(self):
assert self.is_latch()
return self._right[0]
def get_next(self):
assert self.is_latch()
return self._right[1]
def set_init(self, init):
assert self.is_latch()
self._right = (init, self._right[1])
def set_next(self, f):
assert self.is_latch()
self._right = (self._right[0], f)
def __repr__(self):
type = "ERROR"
if self._type==_Node.AND:
type = "AND"
elif self._type==_Node.BUFFER:
type = "BUFFER"
elif self._type==_Node.CONST0:
type = "CONST0"
elif self._type==_Node.LATCH:
type = "LATCH"
elif self._type==_Node.PI:
type = "PI"
return "<pyaig.aig._Node _type=%s, _left=%s, _right=%s>"%(type, str(self._left), str(self._right))
class AIG(object):
# map AIG nodes to AIG nodes, take negation into account
class fmap(object):
def __init__(self, fs=[], negate_if_negated=None, zero=None):
self.negate_if_negated = negate_if_negated if negate_if_negated else AIG.negate_if_negated
zero = AIG.get_const0() if zero is None else zero
self.m = { AIG.get_const0():zero }
if fs:
self.update(fs)
def __getitem__(self, f):
return self.negate_if_negated( self.m[AIG.get_positive(f)], f )
def __setitem__(self, f, g):
self.m[ AIG.get_positive(f) ] = self.negate_if_negated(g, f)
def __contains__(self, f):
return AIG.get_positive(f) in self.m
def __delitem__(self, f):
del self.m[ AIG.get_positive(f) ]
def iteritems(self):
return iteritems(self.m)
def update(self, fs):
self.m.update( (AIG.get_positive(f), self.negate_if_negated(g, f)) for f,g in fs )
class fset(object):
def __init__(self, fs=[]):
self.s = set( AIG.get_positive(f) for f in fs )
def __contains__(self, f):
return AIG.get_positive(f) in self.s
def __len__(self):
return len(self.s)
def __iter__(self):
return self.s.__iter__()
def add(self, f):
f = AIG.get_positive(f)
res = f in self.s
self.s.add(f)
return res
def remove(self, f):
return self.s.remove( AIG.get_positive(f) )
# PO types
OUTPUT = 0
BAD_STATES = 1
CONSTRAINT = 2
JUSTICE = 3
FAIRNESS = 4
# Latch initialization
INIT_ZERO = _Node.INIT_ZERO
INIT_ONE = _Node.INIT_ONE
INIT_NONDET = _Node.INIT_NONDET
def __init__(self, name=None, flat_name = (lambda n: n) ):
self._name = name
self._strash = {}
self._pis = []
self._latches = []
self._buffers = []
self._pos = []
self._justice = []
self._nodes = []
self._name_to_id = {}
self._id_to_name = {}
self._name_to_po = {}
self._po_to_name = {}
self._flat_name = flat_name
self._fanouts = {}
self._nodes.append( _Node.make_const0() )
def deref(self, f):
return self._nodes[ f>>1 ]
def name(self):
return self._name
# Create basic objects
@staticmethod
def get_const(c):
if c:
return AIG.get_const1()
return AIG.get_const0()
@staticmethod
def get_const0():
return 0
@staticmethod
def get_const1():
return 1
def create_pi(self, name=None):
pi_id = len(self._pis)
n = _Node.make_pi(pi_id)
fn = len(self._nodes)<<1
self._nodes.append(n)
self._pis.append( fn )
if name is not None:
self.set_name(fn, name)
return fn
def create_latch(self, name=None, init=INIT_ZERO, next=None):
l_id = len(self._latches)
n = _Node.make_latch(l_id, init, next)
fn = len(self._nodes)<<1
self._nodes.append(n)
self._latches.append( fn )
if name is not None:
self.set_name(fn, name)
return fn
def create_and(self, left, right):
if left<right:
left, right = right, left
if right==0:
return 0
if right==1:
return left
if left == right:
return right
if left == (right ^ 1):
return 0
key = (_Node.AND, left, right)
if key in self._strash:
return self._strash[key]
f = len(self._nodes)<<1
self._nodes.append( _Node.make_and(left, right) )
self._strash[key] = f
return f
def create_buffer(self, buf_in=0, name=None):
b_id = len(self._buffers)
f = len(self._nodes)<<1
self._nodes.append( _Node.make_buffer(b_id, buf_in) )
self._buffers.append( f )
if name is not None:
self.set_name(f, name)
return f
def convert_buf_to_pi(self, buf):
assert self.is_buffer(buf)
assert self.get_buf_in(buf) >= 0
n = self.deref(buf)
self._buffers[n.get_buf_id()] = -1
n.convert_buf_to_pi(len(self._pis))
self._pis.append(buf)
def create_po(self, f=0, name=None, po_type=OUTPUT ):
po_id = len(self._pos)
self._pos.append( (f, po_type) )
if name is not None:
self.set_po_name(po_id, name)
return po_id
def create_justice(self, po_ids):
po_ids = list(po_ids)
j_id = len(self._justice)
for po_id in po_ids:
assert self.get_po_type(po_id) == AIG.JUSTICE
self._justice.append( po_ids )
return j_id
def remove_justice(self):
for po_ids in self._justice:
for po_id in po_ids:
self.set_po_type(po_id, AIG.OUTPUT)
self._justice = []
# Names
def set_name(self, f, name):
assert not self.is_negated(f)
assert name not in self._name_to_id
assert f not in self._id_to_name
self._name_to_id[name] = f
self._id_to_name[f] = name
def get_id_by_name(self, name):
return self._name_to_id[name]
def has_name(self, f):
return f in self._id_to_name
def name_exists(self, n):
return n in self._name_to_id
def get_name_by_id(self, f):
return self._id_to_name[f]
def remove_name(self, f):
assert self.has_name(f)
name = self.get_name_by_id(f)
del self._id_to_name[f]
del self._name_to_id[name]
def iter_names(self):
return iteritems(self._id_to_name)
def fill_pi_names(self, replace=False, template="I_{}"):
if replace:
for pi in self.get_pis():
if self.has_name(pi):
self.remove_name(pi)
uid = 0
for pi in self.get_pis():
if not self.has_name(pi):
while True:
name = template.format(uid)
uid += 1
if not self.name_exists(name):
break
self.set_name(pi, name)
# PO names
def set_po_name(self, po, name):
assert 0 <= po < len(self._pos)
assert name not in self._name_to_po
assert po not in self._po_to_name
self._name_to_po[name] = po
self._po_to_name[po] = name
def get_po_by_name(self, name):
return self._name_to_po[name]
def po_has_name(self, po):
return po in self._po_to_name
def name_has_po(self, po):
return po in self._name_to_po
def remove_po_name(self, po):
assert self.po_has_name(po)
name = self.get_name_by_po(po)
del self._name_to_po[name]
del self._po_to_name[po]
def get_name_by_po(self, po):
return self._po_to_name[po]
def iter_po_names(self):
return ( (po_id, self.get_po_fanin(po_id), po_name) for po_id, po_name in iteritems(self._po_to_name) )
def fill_po_names(self, replace=False, template="O_{}"):
if replace:
self._name_to_po.clear()
self._po_to_name.clear()
po_names = set(name for _, _, name in self.iter_po_names())
uid = 0
for po_id, _, _ in self.get_pos():
if not self.po_has_name(po_id):
while True:
name = template.format(uid)
uid += 1
if name not in po_names:
break
self.set_po_name(po_id, name)
# Query IDs
@staticmethod
def get_id(f):
return f >> 1
def is_const0(self, f):
n = self.deref(f)
return n.is_const0()
def is_pi(self, f):
n = self.deref(f)
return n.is_pi()
def is_latch(self, f):
n = self.deref(f)
return n.is_latch()
def is_and(self, f):
n = self.deref(f)
return n.is_and()
def is_buffer(self, f):
n = self.deref(f)
return n.is_buffer()
# PIs
def get_pi_by_id(self, pi_id):
return self._pis[ pi_id ]
# Get/Set next for latches
def set_init(self, l, init):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
n.set_init(init)
def set_next(self, l, f):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
n.set_next(f)
def get_init(self, l):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
return n.get_init()
def get_next(self, l):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
return n.get_next()
# And gate
def get_and_fanins(self, f):
assert self.is_and(f)
n = self.deref(f)
return (n.get_left(), n.get_right())
def get_and_left(self, f):
assert self.is_and(f)
return self.deref(f).get_left()
def get_and_right(self, f):
assert self.is_and(f)
return self.deref(f).get_right()
# Buffer
def get_buf_in(self, b):
n = self.deref(b)
return n.get_buf_in()
def set_buf_in(self, b, f):
assert b>f
n = self.deref(b)
return n.set_buf_in(f)
def get_buf_id(self, b):
n = self.deref(b)
return n.get_buf_id()
def skip_buf(self, b):
while self.is_buffer(b):
b = AIG.negate_if_negated( self.get_buf_in(b), b )
return b
# Fanins
def get_fanins(self,f):
n = self.deref(f)
return n.get_fanins()
def get_positive_fanins(self,f):
n = self.deref(f)
return (self.get_positive(fi) for fi in n.get_fanins())
def get_positive_seq_fanins(self,f):
n = self.deref(f)
return (self.get_positive(fi) for fi in n.get_seq_fanins())
# PO fanins
def get_po_type(self, po):
assert 0 <= po < len(self._pos)
return self._pos[po][1]
def get_po_fanin(self, po):
assert 0 <= po < len(self._pos)
return self._pos[po][0]
def set_po_fanin(self, po, f):
assert 0 <= po < len(self._pos)
self._pos[po] = ( f, self._pos[po][1] )
def set_po_type(self, po, po_type):
assert 0 <= po < len(self._pos)
self._pos[po] = ( self._pos[po][0], po_type )
# Justice
def get_justice_pos(self, j_id):
assert 0 <= j_id < len(self._justice)
return ( po for po in self._justice[j_id] )
def set_justice_pos(self, j_id, po_ids):
assert 0 <= j_id < len(self._justice)
for po_id in po_ids:
assert self.get_po_type(po_id) == AIG.JUSTICE
self._justice[j_id] = po_ids
# Negation
@staticmethod
def is_negated(f):
return (f&1) != 0
@staticmethod
def get_positive(f):
return (f & ~1)
@staticmethod
def negate(f):
return f ^ 1
@staticmethod
def negate_if(f, c):
if c:
return f^1
else:
return f
@staticmethod
def positive_if(f, c):
if c:
return f
else:
return f^1
@staticmethod
def negate_if_negated(f, c):
return f ^ ( c & 1 )
# Higher-level boolean operations
def create_nand(self, left, right):
return self.negate( self.create_and(left,right) )
def create_or(self, left, right):
return self.negate( self.create_and(self.negate(left), self.negate(right)))
def create_nor(self, left, right):
return self.negate( self.create_or(left, right))
def create_xor(self, left, right):
return self.create_or(
self.create_and( left, self.negate(right) ),
self.create_and( self.negate(left), right )
)
def create_iff(self, left, right):
return self.negate( self.create_xor(left, right) )
def create_implies(self, left, right):
return self.create_or(self.negate(left), right)
def create_ite(self, f_if, f_then, f_else):
return self.create_or(
self.create_and( f_if, f_then),
self.create_and( self.negate(f_if), f_else)
)
# Object numbers
def n_pis(self):
return len(self._pis)
def n_latches(self):
return len(self._latches)
def n_ands(self):
return self.n_nonterminals() - self.n_buffers()
def n_nonterminals(self):
return len(self._nodes) - 1 - self.n_latches() - self.n_pis()
def n_pos(self):
return len( self._pos )
def n_pos_by_type(self, type):
res = 0
for _ in self.get_pos_by_type(type):
res += 1
return res
def n_justice(self):
return len( self._justice )
def n_buffers(self):
return len( self._buffers )
# Object access as iterators (use list() to get a copy)
def construction_order(self):
return ( i<<1 for i in xrange(1, len(self._nodes) ) )
def construction_order_deref(self):
return ( (f, self.deref(f)) for f in self.construction_order() )
def get_pis(self):
return ( i<<1 for i, n in enumerate(self._nodes) if n.is_pi() )
def get_latches(self):
return ( l for l in self._latches )
def get_buffers(self):
return ( b for b in self._buffers if b>=0 )
def get_and_gates(self):
return ( i<<1 for i, n in enumerate(self._nodes) if n.is_and() )
def get_pos(self):
return ( (po_id, po_fanin, po_type) for po_id, (po_fanin, po_type) in enumerate(self._pos) )
def get_pos_by_type(self, type):
return ( (po_id, po_fanin, po_type) for po_id, po_fanin, po_type in self.get_pos() if po_type==type )
def get_po_fanins(self):
return ( po for _,po,_ in self.get_pos() )
def get_po_fanins_by_type(self, type):
return ( po for _,po,po_type in self.get_pos() if po_type==type)
def get_justice_properties(self):
return ( (i,po_ids) for i, po_ids in enumerate( self._justice ) )
def get_nonterminals(self):
return ( i<<1 for i,n in enumerate(self._nodes) if n.is_nonterminal() )
# Python special methods
def __len__(self):
return len(self._nodes)
# return the sequential cone of 'roots', stop at 'stop'
def get_cone(self, roots, stop=[], fanins=get_positive_fanins):
visited = set()
dfs_stack = list(roots)
while dfs_stack:
cur = self.get_positive(dfs_stack.pop())
if cur in visited or cur in stop:
continue
visited.add(cur)
for fi in fanins(self, cur):
if fi not in visited:
dfs_stack.append(fi)
return sorted(visited)
# return the sequential cone of roots
def get_seq_cone(self, roots, stop=[]):
return self.get_cone(roots, stop, fanins=AIG.get_positive_seq_fanins)
def topological_sort(self, roots, stop=()):
""" topologically sort the combinatorial cone of 'roots', stop at 'stop' """
def fanins(f):
if f in stop:
return []
return [ fi for fi in self.get_positive_fanins(f) ]
visited = AIG.fset()
dfs_stack = []
for root in roots:
if visited.add(root):
continue
dfs_stack.append( (root, fanins(root)) )
while dfs_stack:
cur, ds = dfs_stack[-1]
if not ds:
dfs_stack.pop()
if cur is not None:
yield cur
continue
d = ds.pop()
if visited.add(d):
continue
dfs_stack.append( (d,[fi for fi in fanins(d) if fi not in visited]) )
def clean(self, pos=None, justice_pos=None):
""" return a new AIG, containing only the cone of the POs, removing buffers while attempting to preserve names """
aig = AIG()
M = AIG.fmap()
def visit(f, af):
if self.has_name(f):
if AIG.is_negated(af):
aig.set_name( AIG.get_positive(af), "~%s"%self.get_name_by_id(f) )
else:
aig.set_name( af, self.get_name_by_id(f) )
M[f] = af
if pos is None:
pos = range(len(self._pos))
pos = set(pos)
if justice_pos is None:
justice_pos = range(len(self._justice))
for j in justice_pos:
pos.update(self._justice[j])
cone = self.get_seq_cone( self.get_po_fanin(po_id) for po_id in pos )
for f in self.topological_sort(cone):
n = self.deref(f)
if n.is_pi():
visit( f, aig.create_pi() )
elif n.is_and():
visit( f, aig.create_and( M[n.get_left()], M[n.get_right()] ) )
elif n.is_latch():
l = aig.create_latch(init=n.get_init())
visit( f, l )
elif n.is_buffer():
assert False
visit( f, M( n.get_buf_in()) )
for l in self.get_latches():
if l in cone:
aig.set_next(M[l], M[self.get_next(l)])
po_map = {}
for po_id in pos:
po_f = self.get_po_fanin(po_id)
po = aig.create_po( M[po_f], self.get_name_by_po(po_id) if self.po_has_name(po_id) else None, po_type=self.get_po_type(po_id) )
po_map[po_id] = po
for j in justice_pos:
aig.create_justice([ po_map[j_po] for j_po in self._justice[j] ])
return aig
def compose(self, src, M, copy_pos=True):
""" rebuild the AIG 'src' inside 'self', connecting the two AIGs using 'M' """
for f in src.construction_order():
if f in M:
continue
n = src.deref(f)
if n.is_pi():
M[f] = self.create_pi()
elif n.is_and():
M[f] = self.create_and( M[n.get_left()], M[n.get_right()] )
elif n.is_latch():
M[f] = self.create_latch(init=n.get_init())
elif n.is_buffer():
M[f] = self.create_buffer()
for b in src.get_buffers():
self.set_buf_in(M[b], M[src.get_buf_in(b)])
for l in src.get_latches():
self.set_next(M[l], M[src.get_next(l)])
if copy_pos:
for po_id, po_fanin, po_type in src.get_pos():
self.create_po( M[po_fanin], po_type=po_type )
def cutpoint(self, f):
assert self.is_buffer(f)
assert self.has_name(f)
self.convert_buf_to_pi(f)
def build_fanouts(self):
for f in self.construction_order():
for g in self.get_positive_fanins(f):
self._fanouts.setdefault(g, set()).add(f)
def get_fanouts(self, fs):
res = set()
for f in fs:
for fo in self._fanouts[f]:
res.add(fo)
return res
def conjunction( self, fs ):
res = self.get_const1()
for f in fs:
res = self.create_and( res, f )
return res
def balanced_conjunction( self, fs ):
N = len(fs)
if N < 2:
return self.conjunction(fs)
return self.create_and( self.balanced_conjunction(fs[:N/2]), self.balanced_conjunction(fs[N/2:]) )
def disjunction (self, fs):
res = self.get_const0()
for f in fs:
res = self.create_or( res, f )
return res
def balanced_disjunction( self, fs ):
N = len(fs)
if N < 2:
return self.disjunction(fs)
return self.create_or( self.balanced_disjunction(fs[:N/2]), self.balanced_disjunction(fs[N/2:]) )
def large_xor(self, fs):
res = self.get_const0()
for f in fs:
res = self.create_xor(res, f)
return res
def mux(self, select, args):
res = []
for col in zip(*args):
f = self.disjunction( self.create_and(s,c) for s,c in zip(select,col) )
res.append( f )
return res
def create_constraint(aig, f, name=None):
return aig.create_po(aig, f, name=name, po_type=AIG.CONSTRAINT)
def create_property(aig, f, name=None):
return aig.create_po(aig, AIG.negate(f), name=name, po_type=AIG.BAD_STATES)
def create_bad_states(aig, f, name=None):
return aig.create_po(aig, f, name=name, po_type=AIG.BAD_STATES)
| 26.116935 | 139 | 0.523776 |
from past.builtins import xrange
from future.utils import iteritems
import itertools
class _Node(object):
CONST0 = 0
PI = 1
LATCH = 2
AND = 3
BUFFER = 4
INIT_ZERO = 0
INIT_ONE = 1
INIT_NONDET = 2
def __init__(self, node_type, left=0, right=0):
self._type = node_type
self._left = left
self._right = right
@staticmethod
def make_const0():
return _Node(_Node.CONST0)
@staticmethod
def make_pi(pi_id):
return _Node( _Node.PI, pi_id, 0)
@staticmethod
def make_latch(l_id, init, next=None):
return _Node( _Node.LATCH, l_id, (init, next))
@staticmethod
def make_and(left, right):
return _Node(_Node.AND, left, right)
@staticmethod
def make_buffer(buf_id, buf_in):
return _Node(_Node.BUFFER, buf_id, buf_in)
def is_const0(self):
return self._type == _Node.CONST0
def is_pi(self):
return self._type == _Node.PI
def is_and(self):
return self._type == _Node.AND
def is_buffer(self):
return self._type == _Node.BUFFER
def is_latch(self):
return self._type == _Node.LATCH
def is_nonterminal(self):
return self._type in (_Node.AND,_Node.BUFFER)
def get_fanins(self):
if self._type == _Node.AND:
return [self._left, self._right]
elif self._type == _Node.BUFFER:
return [self._right]
else:
return []
def get_seq_fanins(self):
if self._type == _Node.AND:
return [self._left, self._right]
elif self._type == _Node.BUFFER:
return [self._right]
elif self._type == _Node.LATCH:
return [self._right[1]]
else:
return []
def get_left(self):
assert self.is_and()
return self._left
def get_right(self):
assert self.is_and()
return self._right
def get_buf_id(self):
return self._left
def get_buf_in(self):
assert self.is_buffer()
return self._right
def set_buf_in(self, f):
assert self.is_buffer()
self._right = f
def convert_buf_to_pi(self, pi_id):
assert self.is_buffer()
self._type = _Node.PI
self._left = pi_id
self._right = 0
def get_pi_id(self):
assert self.is_pi()
return self._left
def get_latch_id(self):
assert self.is_latch()
return self._left
def get_init(self):
assert self.is_latch()
return self._right[0]
def get_next(self):
assert self.is_latch()
return self._right[1]
def set_init(self, init):
assert self.is_latch()
self._right = (init, self._right[1])
def set_next(self, f):
assert self.is_latch()
self._right = (self._right[0], f)
def __repr__(self):
type = "ERROR"
if self._type==_Node.AND:
type = "AND"
elif self._type==_Node.BUFFER:
type = "BUFFER"
elif self._type==_Node.CONST0:
type = "CONST0"
elif self._type==_Node.LATCH:
type = "LATCH"
elif self._type==_Node.PI:
type = "PI"
return "<pyaig.aig._Node _type=%s, _left=%s, _right=%s>"%(type, str(self._left), str(self._right))
class AIG(object):
class fmap(object):
def __init__(self, fs=[], negate_if_negated=None, zero=None):
self.negate_if_negated = negate_if_negated if negate_if_negated else AIG.negate_if_negated
zero = AIG.get_const0() if zero is None else zero
self.m = { AIG.get_const0():zero }
if fs:
self.update(fs)
def __getitem__(self, f):
return self.negate_if_negated( self.m[AIG.get_positive(f)], f )
def __setitem__(self, f, g):
self.m[ AIG.get_positive(f) ] = self.negate_if_negated(g, f)
def __contains__(self, f):
return AIG.get_positive(f) in self.m
def __delitem__(self, f):
del self.m[ AIG.get_positive(f) ]
def iteritems(self):
return iteritems(self.m)
def update(self, fs):
self.m.update( (AIG.get_positive(f), self.negate_if_negated(g, f)) for f,g in fs )
class fset(object):
def __init__(self, fs=[]):
self.s = set( AIG.get_positive(f) for f in fs )
def __contains__(self, f):
return AIG.get_positive(f) in self.s
def __len__(self):
return len(self.s)
def __iter__(self):
return self.s.__iter__()
def add(self, f):
f = AIG.get_positive(f)
res = f in self.s
self.s.add(f)
return res
def remove(self, f):
return self.s.remove( AIG.get_positive(f) )
OUTPUT = 0
BAD_STATES = 1
CONSTRAINT = 2
JUSTICE = 3
FAIRNESS = 4
INIT_ZERO = _Node.INIT_ZERO
INIT_ONE = _Node.INIT_ONE
INIT_NONDET = _Node.INIT_NONDET
def __init__(self, name=None, flat_name = (lambda n: n) ):
self._name = name
self._strash = {}
self._pis = []
self._latches = []
self._buffers = []
self._pos = []
self._justice = []
self._nodes = []
self._name_to_id = {}
self._id_to_name = {}
self._name_to_po = {}
self._po_to_name = {}
self._flat_name = flat_name
self._fanouts = {}
self._nodes.append( _Node.make_const0() )
def deref(self, f):
return self._nodes[ f>>1 ]
def name(self):
return self._name
@staticmethod
def get_const(c):
if c:
return AIG.get_const1()
return AIG.get_const0()
@staticmethod
def get_const0():
return 0
@staticmethod
def get_const1():
return 1
def create_pi(self, name=None):
pi_id = len(self._pis)
n = _Node.make_pi(pi_id)
fn = len(self._nodes)<<1
self._nodes.append(n)
self._pis.append( fn )
if name is not None:
self.set_name(fn, name)
return fn
def create_latch(self, name=None, init=INIT_ZERO, next=None):
l_id = len(self._latches)
n = _Node.make_latch(l_id, init, next)
fn = len(self._nodes)<<1
self._nodes.append(n)
self._latches.append( fn )
if name is not None:
self.set_name(fn, name)
return fn
def create_and(self, left, right):
if left<right:
left, right = right, left
if right==0:
return 0
if right==1:
return left
if left == right:
return right
if left == (right ^ 1):
return 0
key = (_Node.AND, left, right)
if key in self._strash:
return self._strash[key]
f = len(self._nodes)<<1
self._nodes.append( _Node.make_and(left, right) )
self._strash[key] = f
return f
def create_buffer(self, buf_in=0, name=None):
b_id = len(self._buffers)
f = len(self._nodes)<<1
self._nodes.append( _Node.make_buffer(b_id, buf_in) )
self._buffers.append( f )
if name is not None:
self.set_name(f, name)
return f
def convert_buf_to_pi(self, buf):
assert self.is_buffer(buf)
assert self.get_buf_in(buf) >= 0
n = self.deref(buf)
self._buffers[n.get_buf_id()] = -1
n.convert_buf_to_pi(len(self._pis))
self._pis.append(buf)
def create_po(self, f=0, name=None, po_type=OUTPUT ):
po_id = len(self._pos)
self._pos.append( (f, po_type) )
if name is not None:
self.set_po_name(po_id, name)
return po_id
def create_justice(self, po_ids):
po_ids = list(po_ids)
j_id = len(self._justice)
for po_id in po_ids:
assert self.get_po_type(po_id) == AIG.JUSTICE
self._justice.append( po_ids )
return j_id
def remove_justice(self):
for po_ids in self._justice:
for po_id in po_ids:
self.set_po_type(po_id, AIG.OUTPUT)
self._justice = []
def set_name(self, f, name):
assert not self.is_negated(f)
assert name not in self._name_to_id
assert f not in self._id_to_name
self._name_to_id[name] = f
self._id_to_name[f] = name
def get_id_by_name(self, name):
return self._name_to_id[name]
def has_name(self, f):
return f in self._id_to_name
def name_exists(self, n):
return n in self._name_to_id
def get_name_by_id(self, f):
return self._id_to_name[f]
def remove_name(self, f):
assert self.has_name(f)
name = self.get_name_by_id(f)
del self._id_to_name[f]
del self._name_to_id[name]
def iter_names(self):
return iteritems(self._id_to_name)
def fill_pi_names(self, replace=False, template="I_{}"):
if replace:
for pi in self.get_pis():
if self.has_name(pi):
self.remove_name(pi)
uid = 0
for pi in self.get_pis():
if not self.has_name(pi):
while True:
name = template.format(uid)
uid += 1
if not self.name_exists(name):
break
self.set_name(pi, name)
def set_po_name(self, po, name):
assert 0 <= po < len(self._pos)
assert name not in self._name_to_po
assert po not in self._po_to_name
self._name_to_po[name] = po
self._po_to_name[po] = name
def get_po_by_name(self, name):
return self._name_to_po[name]
def po_has_name(self, po):
return po in self._po_to_name
def name_has_po(self, po):
return po in self._name_to_po
def remove_po_name(self, po):
assert self.po_has_name(po)
name = self.get_name_by_po(po)
del self._name_to_po[name]
del self._po_to_name[po]
def get_name_by_po(self, po):
return self._po_to_name[po]
def iter_po_names(self):
return ( (po_id, self.get_po_fanin(po_id), po_name) for po_id, po_name in iteritems(self._po_to_name) )
def fill_po_names(self, replace=False, template="O_{}"):
if replace:
self._name_to_po.clear()
self._po_to_name.clear()
po_names = set(name for _, _, name in self.iter_po_names())
uid = 0
for po_id, _, _ in self.get_pos():
if not self.po_has_name(po_id):
while True:
name = template.format(uid)
uid += 1
if name not in po_names:
break
self.set_po_name(po_id, name)
@staticmethod
def get_id(f):
return f >> 1
def is_const0(self, f):
n = self.deref(f)
return n.is_const0()
def is_pi(self, f):
n = self.deref(f)
return n.is_pi()
def is_latch(self, f):
n = self.deref(f)
return n.is_latch()
def is_and(self, f):
n = self.deref(f)
return n.is_and()
def is_buffer(self, f):
n = self.deref(f)
return n.is_buffer()
def get_pi_by_id(self, pi_id):
return self._pis[ pi_id ]
def set_init(self, l, init):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
n.set_init(init)
def set_next(self, l, f):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
n.set_next(f)
def get_init(self, l):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
return n.get_init()
def get_next(self, l):
assert not self.is_negated(l)
assert self.is_latch(l)
n = self.deref(l)
return n.get_next()
def get_and_fanins(self, f):
assert self.is_and(f)
n = self.deref(f)
return (n.get_left(), n.get_right())
def get_and_left(self, f):
assert self.is_and(f)
return self.deref(f).get_left()
def get_and_right(self, f):
assert self.is_and(f)
return self.deref(f).get_right()
def get_buf_in(self, b):
n = self.deref(b)
return n.get_buf_in()
def set_buf_in(self, b, f):
assert b>f
n = self.deref(b)
return n.set_buf_in(f)
def get_buf_id(self, b):
n = self.deref(b)
return n.get_buf_id()
def skip_buf(self, b):
while self.is_buffer(b):
b = AIG.negate_if_negated( self.get_buf_in(b), b )
return b
def get_fanins(self,f):
n = self.deref(f)
return n.get_fanins()
def get_positive_fanins(self,f):
n = self.deref(f)
return (self.get_positive(fi) for fi in n.get_fanins())
def get_positive_seq_fanins(self,f):
n = self.deref(f)
return (self.get_positive(fi) for fi in n.get_seq_fanins())
def get_po_type(self, po):
assert 0 <= po < len(self._pos)
return self._pos[po][1]
def get_po_fanin(self, po):
assert 0 <= po < len(self._pos)
return self._pos[po][0]
def set_po_fanin(self, po, f):
assert 0 <= po < len(self._pos)
self._pos[po] = ( f, self._pos[po][1] )
def set_po_type(self, po, po_type):
assert 0 <= po < len(self._pos)
self._pos[po] = ( self._pos[po][0], po_type )
def get_justice_pos(self, j_id):
assert 0 <= j_id < len(self._justice)
return ( po for po in self._justice[j_id] )
def set_justice_pos(self, j_id, po_ids):
assert 0 <= j_id < len(self._justice)
for po_id in po_ids:
assert self.get_po_type(po_id) == AIG.JUSTICE
self._justice[j_id] = po_ids
@staticmethod
def is_negated(f):
return (f&1) != 0
@staticmethod
def get_positive(f):
return (f & ~1)
@staticmethod
def negate(f):
return f ^ 1
@staticmethod
def negate_if(f, c):
if c:
return f^1
else:
return f
@staticmethod
def positive_if(f, c):
if c:
return f
else:
return f^1
@staticmethod
def negate_if_negated(f, c):
return f ^ ( c & 1 )
def create_nand(self, left, right):
return self.negate( self.create_and(left,right) )
def create_or(self, left, right):
return self.negate( self.create_and(self.negate(left), self.negate(right)))
def create_nor(self, left, right):
return self.negate( self.create_or(left, right))
def create_xor(self, left, right):
return self.create_or(
self.create_and( left, self.negate(right) ),
self.create_and( self.negate(left), right )
)
def create_iff(self, left, right):
return self.negate( self.create_xor(left, right) )
def create_implies(self, left, right):
return self.create_or(self.negate(left), right)
def create_ite(self, f_if, f_then, f_else):
return self.create_or(
self.create_and( f_if, f_then),
self.create_and( self.negate(f_if), f_else)
)
def n_pis(self):
return len(self._pis)
def n_latches(self):
return len(self._latches)
def n_ands(self):
return self.n_nonterminals() - self.n_buffers()
def n_nonterminals(self):
return len(self._nodes) - 1 - self.n_latches() - self.n_pis()
def n_pos(self):
return len( self._pos )
def n_pos_by_type(self, type):
res = 0
for _ in self.get_pos_by_type(type):
res += 1
return res
def n_justice(self):
return len( self._justice )
def n_buffers(self):
return len( self._buffers )
def construction_order(self):
return ( i<<1 for i in xrange(1, len(self._nodes) ) )
def construction_order_deref(self):
return ( (f, self.deref(f)) for f in self.construction_order() )
def get_pis(self):
return ( i<<1 for i, n in enumerate(self._nodes) if n.is_pi() )
def get_latches(self):
return ( l for l in self._latches )
def get_buffers(self):
return ( b for b in self._buffers if b>=0 )
def get_and_gates(self):
return ( i<<1 for i, n in enumerate(self._nodes) if n.is_and() )
def get_pos(self):
return ( (po_id, po_fanin, po_type) for po_id, (po_fanin, po_type) in enumerate(self._pos) )
def get_pos_by_type(self, type):
return ( (po_id, po_fanin, po_type) for po_id, po_fanin, po_type in self.get_pos() if po_type==type )
def get_po_fanins(self):
return ( po for _,po,_ in self.get_pos() )
def get_po_fanins_by_type(self, type):
return ( po for _,po,po_type in self.get_pos() if po_type==type)
def get_justice_properties(self):
return ( (i,po_ids) for i, po_ids in enumerate( self._justice ) )
def get_nonterminals(self):
return ( i<<1 for i,n in enumerate(self._nodes) if n.is_nonterminal() )
def __len__(self):
return len(self._nodes)
def get_cone(self, roots, stop=[], fanins=get_positive_fanins):
visited = set()
dfs_stack = list(roots)
while dfs_stack:
cur = self.get_positive(dfs_stack.pop())
if cur in visited or cur in stop:
continue
visited.add(cur)
for fi in fanins(self, cur):
if fi not in visited:
dfs_stack.append(fi)
return sorted(visited)
def get_seq_cone(self, roots, stop=[]):
return self.get_cone(roots, stop, fanins=AIG.get_positive_seq_fanins)
def topological_sort(self, roots, stop=()):
def fanins(f):
if f in stop:
return []
return [ fi for fi in self.get_positive_fanins(f) ]
visited = AIG.fset()
dfs_stack = []
for root in roots:
if visited.add(root):
continue
dfs_stack.append( (root, fanins(root)) )
while dfs_stack:
cur, ds = dfs_stack[-1]
if not ds:
dfs_stack.pop()
if cur is not None:
yield cur
continue
d = ds.pop()
if visited.add(d):
continue
dfs_stack.append( (d,[fi for fi in fanins(d) if fi not in visited]) )
def clean(self, pos=None, justice_pos=None):
aig = AIG()
M = AIG.fmap()
def visit(f, af):
if self.has_name(f):
if AIG.is_negated(af):
aig.set_name( AIG.get_positive(af), "~%s"%self.get_name_by_id(f) )
else:
aig.set_name( af, self.get_name_by_id(f) )
M[f] = af
if pos is None:
pos = range(len(self._pos))
pos = set(pos)
if justice_pos is None:
justice_pos = range(len(self._justice))
for j in justice_pos:
pos.update(self._justice[j])
cone = self.get_seq_cone( self.get_po_fanin(po_id) for po_id in pos )
for f in self.topological_sort(cone):
n = self.deref(f)
if n.is_pi():
visit( f, aig.create_pi() )
elif n.is_and():
visit( f, aig.create_and( M[n.get_left()], M[n.get_right()] ) )
elif n.is_latch():
l = aig.create_latch(init=n.get_init())
visit( f, l )
elif n.is_buffer():
assert False
visit( f, M( n.get_buf_in()) )
for l in self.get_latches():
if l in cone:
aig.set_next(M[l], M[self.get_next(l)])
po_map = {}
for po_id in pos:
po_f = self.get_po_fanin(po_id)
po = aig.create_po( M[po_f], self.get_name_by_po(po_id) if self.po_has_name(po_id) else None, po_type=self.get_po_type(po_id) )
po_map[po_id] = po
for j in justice_pos:
aig.create_justice([ po_map[j_po] for j_po in self._justice[j] ])
return aig
def compose(self, src, M, copy_pos=True):
for f in src.construction_order():
if f in M:
continue
n = src.deref(f)
if n.is_pi():
M[f] = self.create_pi()
elif n.is_and():
M[f] = self.create_and( M[n.get_left()], M[n.get_right()] )
elif n.is_latch():
M[f] = self.create_latch(init=n.get_init())
elif n.is_buffer():
M[f] = self.create_buffer()
for b in src.get_buffers():
self.set_buf_in(M[b], M[src.get_buf_in(b)])
for l in src.get_latches():
self.set_next(M[l], M[src.get_next(l)])
if copy_pos:
for po_id, po_fanin, po_type in src.get_pos():
self.create_po( M[po_fanin], po_type=po_type )
def cutpoint(self, f):
assert self.is_buffer(f)
assert self.has_name(f)
self.convert_buf_to_pi(f)
def build_fanouts(self):
for f in self.construction_order():
for g in self.get_positive_fanins(f):
self._fanouts.setdefault(g, set()).add(f)
def get_fanouts(self, fs):
res = set()
for f in fs:
for fo in self._fanouts[f]:
res.add(fo)
return res
def conjunction( self, fs ):
res = self.get_const1()
for f in fs:
res = self.create_and( res, f )
return res
def balanced_conjunction( self, fs ):
N = len(fs)
if N < 2:
return self.conjunction(fs)
return self.create_and( self.balanced_conjunction(fs[:N/2]), self.balanced_conjunction(fs[N/2:]) )
def disjunction (self, fs):
res = self.get_const0()
for f in fs:
res = self.create_or( res, f )
return res
def balanced_disjunction( self, fs ):
N = len(fs)
if N < 2:
return self.disjunction(fs)
return self.create_or( self.balanced_disjunction(fs[:N/2]), self.balanced_disjunction(fs[N/2:]) )
def large_xor(self, fs):
res = self.get_const0()
for f in fs:
res = self.create_xor(res, f)
return res
def mux(self, select, args):
res = []
for col in zip(*args):
f = self.disjunction( self.create_and(s,c) for s,c in zip(select,col) )
res.append( f )
return res
def create_constraint(aig, f, name=None):
return aig.create_po(aig, f, name=name, po_type=AIG.CONSTRAINT)
def create_property(aig, f, name=None):
return aig.create_po(aig, AIG.negate(f), name=name, po_type=AIG.BAD_STATES)
def create_bad_states(aig, f, name=None):
return aig.create_po(aig, f, name=name, po_type=AIG.BAD_STATES)
| true | true |
f7f4ad070b400d83a0f0d68b1019bf63521fe6a2 | 1,550 | py | Python | src/rtde/__init__.py | lucascimeca/Robotics_Palpation | 107b39f8ec464441e64e66905e718e5f1a79761e | [
"MIT"
] | 19 | 2018-07-24T22:44:22.000Z | 2022-03-26T09:37:08.000Z | src/rtde/__init__.py | lucascimeca/Robotics_Palpation | 107b39f8ec464441e64e66905e718e5f1a79761e | [
"MIT"
] | 4 | 2018-05-02T12:52:35.000Z | 2021-02-15T22:59:54.000Z | src/rtde/__init__.py | lucascimeca/Robotics_Palpation | 107b39f8ec464441e64e66905e718e5f1a79761e | [
"MIT"
] | 4 | 2018-01-22T11:06:28.000Z | 2020-03-17T08:37:24.000Z | # Copyright (c) 2016, Universal Robots A/S,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Universal Robots A/S nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 67.391304 | 81 | 0.775484 | true | true | |
f7f4ad2bf8d42c9435e9733de12933a3e894f180 | 2,352 | py | Python | dechorate/cadzow.py | Chutlhu/DechorateDB | 378eda37ed296f2823e3306238101343c5f4084a | [
"MIT"
] | 7 | 2021-06-01T10:57:58.000Z | 2022-03-30T03:17:16.000Z | dechorate/cadzow.py | Chutlhu/DechorateDB | 378eda37ed296f2823e3306238101343c5f4084a | [
"MIT"
] | 3 | 2021-06-25T14:48:40.000Z | 2022-02-10T05:36:30.000Z | dechorate/cadzow.py | Chutlhu/DechorateDB | 378eda37ed296f2823e3306238101343c5f4084a | [
"MIT"
] | null | null | null | import numpy as np
from dechorate.utils.dsp_utils import make_toepliz_as_in_mulan, reshape_toeplitz, enforce_toeplitz, build_frobenius_weights
def cadzow_denoise(A, n_spikes, thr_Cadzow=2e-5):
'''
Cadzow denoising method
from Condat implementation
'''
N, P = A.shape
K = n_spikes
# run Cadzow denoising
for _ in range(100):
# low-rank projection
u, s, vh = np.linalg.svd(A, full_matrices=False)
A = np.dot(u[:, :K] * s[:K], vh[:K, :])
print(s[:K], s[K])
# enforce Toeplitz structure
A = enforce_toeplitz(A)
if s[K] < thr_Cadzow:
break
A = reshape_toeplitz(A, K+1)
assert A.shape[1] == K+1
return A
def condat_denoise(A, n_spikes, thr_Cadzow=2e-5):
'''
Method from Condat
the matrices have size D-L-1 x L. K <= L <= M required.
'''
N, L = A.shape # matrix have size D-L+1 x L
D = N + L - 1
K = n_spikes
# parameters
niter = 20 # number of iterations.
μ = 0.1 # parameter. Must be in ]0,2[
γ = 0.51*μ # parameter. Must be in ]μ/2,1[
# initialization of the weighted matrix, w
W = build_frobenius_weights(A)
Tnoisy = A.copy()
Tdensd = A.copy() # the noisy matrix is the initialization
Tauxil = A.copy() # auxtiliary matrix
for _ in range(niter):
U, s, Vh = np.linalg.svd(
Tauxil + γ*(Tdensd-Tauxil) + μ*(Tnoisy-Tdensd)/W,
full_matrices=False)
# SVD truncation -> Tdenoised has rank K
Tdensd = np.dot(U[:, :K] * s[:K], Vh[:K, :])
print(s[:K], s[K])
Tauxil = Tauxil-Tdensd+enforce_toeplitz(2*Tdensd-Tauxil)
# at this point, Tdensd has rank K but is not exactly Toeplitz
Tdensd = enforce_toeplitz(Tdensd)
# we reshape the Toeplitz matrix Tdensd into a Toeplitz matrix with K+1 columns
Tdensd = reshape_toeplitz(Tdensd, K+1)
assert Tdensd.shape[1] == K+1
return Tdensd
def amplitudes_from_locations(obs, taus, nfft, Fs):
# according to Condat's paper (Condat2015cadzow)
# observation are in the FFT domain
# [-M, M] Fourier coefficient of the signal
assert len(obs) > nfft
v = np.fft.fft(obs, nfft)
assert len(v) == 2*nfft+1
M = nfft
U = np.exp(-1j*2*np.pi/tau*MM@tk)
akest = np.real(np.linalg.lstsq(U, vobs)[0].T)
return akest
| 29.4 | 123 | 0.608418 | import numpy as np
from dechorate.utils.dsp_utils import make_toepliz_as_in_mulan, reshape_toeplitz, enforce_toeplitz, build_frobenius_weights
def cadzow_denoise(A, n_spikes, thr_Cadzow=2e-5):
N, P = A.shape
K = n_spikes
for _ in range(100):
u, s, vh = np.linalg.svd(A, full_matrices=False)
A = np.dot(u[:, :K] * s[:K], vh[:K, :])
print(s[:K], s[K])
A = enforce_toeplitz(A)
if s[K] < thr_Cadzow:
break
A = reshape_toeplitz(A, K+1)
assert A.shape[1] == K+1
return A
def condat_denoise(A, n_spikes, thr_Cadzow=2e-5):
N, L = A.shape
D = N + L - 1
K = n_spikes
niter = 20
μ = 0.1
γ = 0.51*μ
W = build_frobenius_weights(A)
Tnoisy = A.copy()
Tdensd = A.copy()
Tauxil = A.copy()
for _ in range(niter):
U, s, Vh = np.linalg.svd(
Tauxil + γ*(Tdensd-Tauxil) + μ*(Tnoisy-Tdensd)/W,
full_matrices=False)
Tdensd = np.dot(U[:, :K] * s[:K], Vh[:K, :])
print(s[:K], s[K])
Tauxil = Tauxil-Tdensd+enforce_toeplitz(2*Tdensd-Tauxil)
Tdensd = enforce_toeplitz(Tdensd)
Tdensd = reshape_toeplitz(Tdensd, K+1)
assert Tdensd.shape[1] == K+1
return Tdensd
def amplitudes_from_locations(obs, taus, nfft, Fs):
# observation are in the FFT domain
# [-M, M] Fourier coefficient of the signal
assert len(obs) > nfft
v = np.fft.fft(obs, nfft)
assert len(v) == 2*nfft+1
M = nfft
U = np.exp(-1j*2*np.pi/tau*MM@tk)
akest = np.real(np.linalg.lstsq(U, vobs)[0].T)
return akest
| true | true |
f7f4b06c8a888dedad50f4c93f188939d156e66a | 181 | py | Python | corehq/sql_proxy_accessors/migrations/0008_get_case_types_for_domain.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/sql_proxy_accessors/migrations/0008_get_case_types_for_domain.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/sql_proxy_accessors/migrations/0008_get_case_types_for_domain.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sql_proxy_accessors', '0007_ledger_accessors'),
]
operations = []
| 16.454545 | 57 | 0.679558 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sql_proxy_accessors', '0007_ledger_accessors'),
]
operations = []
| true | true |
f7f4b0765f743d8aef0ebc5645170adc7666e223 | 32,168 | py | Python | rasa/telemetry.py | kunci115/rasa | 41e3b227101e6ace3f85c2d99a7f48f4528a8b93 | [
"Apache-2.0"
] | 1 | 2021-08-02T03:42:30.000Z | 2021-08-02T03:42:30.000Z | rasa/telemetry.py | kunci115/rasa | 41e3b227101e6ace3f85c2d99a7f48f4528a8b93 | [
"Apache-2.0"
] | 42 | 2021-05-26T08:35:31.000Z | 2022-03-01T13:31:49.000Z | rasa/telemetry.py | kunci115/rasa | 41e3b227101e6ace3f85c2d99a7f48f4528a8b93 | [
"Apache-2.0"
] | null | null | null | import asyncio
from datetime import datetime
from functools import wraps
import hashlib
import json
import logging
import multiprocessing
import os
from pathlib import Path
import platform
import sys
import textwrap
import typing
from typing import Any, Callable, Dict, List, Optional, Text
import uuid
import async_generator
import requests
from terminaltables import SingleTable
import rasa
from rasa import model
from rasa.constants import (
CONFIG_FILE_TELEMETRY_KEY,
CONFIG_TELEMETRY_DATE,
CONFIG_TELEMETRY_ENABLED,
CONFIG_TELEMETRY_ID,
)
from rasa.shared.constants import DOCS_URL_TELEMETRY
from rasa.shared.exceptions import RasaException
import rasa.shared.utils.io
from rasa.utils import common as rasa_utils
import rasa.utils.io
if typing.TYPE_CHECKING:
from rasa.core.brokers.broker import EventBroker
from rasa.core.tracker_store import TrackerStore
from rasa.core.channels.channel import InputChannel
from rasa.core.agent import Agent
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.core.utils import AvailableEndpoints
logger = logging.getLogger(__name__)
SEGMENT_ENDPOINT = "https://api.segment.io/v1/track"
SEGMENT_REQUEST_TIMEOUT = 5 # seconds
TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_ENABLED"
TELEMETRY_DEBUG_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_DEBUG"
# the environment variable can be used for local development to set a test key
# e.g. `RASA_TELEMETRY_WRITE_KEY=12354 rasa train`
TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_WRITE_KEY"
EXCEPTION_WRITE_KEY_ENVIRONMENT_VARIABLE = "RASA_EXCEPTION_WRITE_KEY"
TELEMETRY_ID = "metrics_id"
TELEMETRY_ENABLED_BY_DEFAULT = True
# if one of these environment variables is set, we assume to be running in CI env
CI_ENVIRONMENT_TELL = [
"bamboo.buildKey",
"BUILD_ID",
"BUILD_NUMBER",
"BUILDKITE",
"CI",
"CIRCLECI",
"CONTINUOUS_INTEGRATION",
"GITHUB_ACTIONS",
"HUDSON_URL",
"JENKINS_URL",
"TEAMCITY_VERSION",
"TRAVIS",
]
# If updating or creating a new event, remember to update
# https://rasa.com/docs/rasa/telemetry
TRAINING_STARTED_EVENT = "Training Started"
TRAINING_COMPLETED_EVENT = "Training Completed"
TELEMETRY_DISABLED_EVENT = "Telemetry Disabled"
TELEMETRY_DATA_SPLIT_EVENT = "Training Data Split"
TELEMETRY_DATA_VALIDATED_EVENT = "Training Data Validated"
TELEMETRY_DATA_CONVERTED_EVENT = "Training Data Converted"
TELEMETRY_TRACKER_EXPORTED_EVENT = "Tracker Exported"
TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT = "Interactive Learning Started"
TELEMETRY_SERVER_STARTED_EVENT = "Server Started"
TELEMETRY_PROJECT_CREATED_EVENT = "Project Created"
TELEMETRY_SHELL_STARTED_EVENT = "Shell Started"
TELEMETRY_RASA_X_LOCAL_STARTED_EVENT = "Rasa X Local Started"
TELEMETRY_VISUALIZATION_STARTED_EVENT = "Story Visualization Started"
TELEMETRY_TEST_CORE_EVENT = "Model Core Tested"
TELEMETRY_TEST_NLU_EVENT = "Model NLU Tested"
# used to calculate the context on the first call and cache it afterwards
TELEMETRY_CONTEXT = None
def print_telemetry_reporting_info() -> None:
"""Print telemetry information to std out."""
message = textwrap.dedent(
f"""
Rasa Open Source reports anonymous usage telemetry to help improve the product
for all its users.
If you'd like to opt-out, you can use `rasa telemetry disable`.
To learn more, check out {DOCS_URL_TELEMETRY}."""
).strip()
table = SingleTable([[message]])
print(table.table)
def _default_telemetry_configuration(is_enabled: bool) -> Dict[Text, Any]:
return {
CONFIG_TELEMETRY_ENABLED: is_enabled,
CONFIG_TELEMETRY_ID: uuid.uuid4().hex,
CONFIG_TELEMETRY_DATE: datetime.now(),
}
def _write_default_telemetry_configuration(
is_enabled: bool = TELEMETRY_ENABLED_BY_DEFAULT,
) -> bool:
new_config = _default_telemetry_configuration(is_enabled)
success = rasa_utils.write_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, new_config
)
# Do not show info if user has enabled/disabled telemetry via env var
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if is_enabled and success and telemetry_environ is None:
print_telemetry_reporting_info()
return success
def _is_telemetry_enabled_in_configuration() -> bool:
"""Read telemetry configuration from the user's Rasa config file in $HOME.
Creates a default configuration if no configuration exists.
Returns:
`True`, if telemetry is enabled, `False` otherwise.
"""
try:
stored_config = rasa_utils.read_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, unavailable_ok=False
)
return stored_config[CONFIG_TELEMETRY_ENABLED]
except ValueError as e:
logger.debug(f"Could not read telemetry settings from configuration file: {e}")
# seems like there is no config, we'll create one and enable telemetry
success = _write_default_telemetry_configuration()
# if writing the configuration failed, telemetry will be disabled
return TELEMETRY_ENABLED_BY_DEFAULT and success
def is_telemetry_enabled() -> bool:
"""Check if telemetry is enabled either in configuration or environment.
Returns:
`True`, if telemetry is enabled, `False` otherwise.
"""
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if telemetry_environ is not None:
return telemetry_environ.lower() == "true"
try:
return rasa_utils.read_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, unavailable_ok=False
)[CONFIG_TELEMETRY_ENABLED]
except ValueError:
return False
def initialize_telemetry() -> bool:
"""Read telemetry configuration from the user's Rasa config file in $HOME.
Creates a default configuration if no configuration exists.
Returns:
`True`, if telemetry is enabled, `False` otherwise.
"""
try:
# calling this even if the environment variable is set makes sure the
# configuration is created and there is a telemetry ID
is_enabled_in_configuration = _is_telemetry_enabled_in_configuration()
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if telemetry_environ is None:
return is_enabled_in_configuration
return telemetry_environ.lower() == "true"
except Exception as e: # skipcq:PYL-W0703
logger.exception(
f"Failed to initialize telemetry reporting: {e}."
f"Telemetry reporting will be disabled."
)
return False
def ensure_telemetry_enabled(f: Callable[..., Any]) -> Callable[..., Any]:
"""Function decorator for telemetry functions that ensures telemetry is enabled.
WARNING: does not work as a decorator for async generators.
Args:
f: function to call if telemetry is enabled
Returns:
Return wrapped function
"""
# allows us to use the decorator for async and non async functions
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def decorated_coroutine(*args: Any, **kwargs: Any) -> Any:
if is_telemetry_enabled():
return await f(*args, **kwargs)
return None
return decorated_coroutine
@wraps(f)
def decorated(*args: Any, **kwargs: Any) -> Any:
if is_telemetry_enabled():
return f(*args, **kwargs)
return None
return decorated
def _fetch_write_key(tool: Text, environment_variable: Text) -> Optional[Text]:
"""Read the write key from a tool from our set of keys.
Args:
tool: name of the tool we want to fetch a key for
environment_variable: name of the environment variable to set the key
Returns:
write key, if a key was present.
"""
import pkg_resources
from rasa import __name__ as name
if os.environ.get(environment_variable):
# a write key set using the environment variable will always
# overwrite any key provided as part of the package (`keys` file)
return os.environ.get(environment_variable)
write_key_path = pkg_resources.resource_filename(name, "keys")
# noinspection PyBroadException
try:
with open(write_key_path) as f:
return json.load(f).get(tool)
except Exception: # skipcq:PYL-W0703
return None
def telemetry_write_key() -> Optional[Text]:
"""Read the Segment write key from the segment key text file.
The segment key text file should by present only in wheel/sdist packaged
versions of Rasa Open Source. This avoids running telemetry locally when
developing on Rasa or when running CI builds.
In local development, this should always return `None` to avoid logging telemetry.
Returns:
Segment write key, if the key file was present.
"""
return _fetch_write_key("segment", TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE)
def sentry_write_key() -> Optional[Text]:
"""Read the sentry write key from the sentry key text file.
Returns:
Sentry write key, if the key file was present.
"""
return _fetch_write_key("sentry", EXCEPTION_WRITE_KEY_ENVIRONMENT_VARIABLE)
def _encode_base64(original: Text, encoding: Text = "utf-8") -> Text:
"""Encodes a string as a base64 string.
Args:
original: Text to be encoded.
encoding: Encoding used to convert text to binary.
Returns:
Encoded text.
"""
import base64
return base64.b64encode(original.encode(encoding)).decode(encoding)
def segment_request_header(write_key: Text) -> Dict[Text, Any]:
"""Use a segment write key to create authentication headers for the segment API.
Args:
write_key: Authentication key for segment.
Returns:
Authentication headers for segment.
"""
return {
"Authorization": "Basic {}".format(_encode_base64(write_key + ":")),
"Content-Type": "application/json",
}
def segment_request_payload(
distinct_id: Text,
event_name: Text,
properties: Dict[Text, Any],
context: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Compose a valid payload for the segment API.
Args:
distinct_id: Unique telemetry ID.
event_name: Name of the event.
properties: Values to report along the event.
context: Context information about the event.
Returns:
Valid segment payload.
"""
return {
"userId": distinct_id,
"event": event_name,
"properties": properties,
"context": context,
}
def in_continuous_integration() -> bool:
"""Returns `True` if currently running inside a continuous integration context."""
return any(env in os.environ for env in CI_ENVIRONMENT_TELL)
def _is_telemetry_debug_enabled() -> bool:
"""Check if telemetry debug mode is enabled."""
return (
os.environ.get(TELEMETRY_DEBUG_ENVIRONMENT_VARIABLE, "false").lower() == "true"
)
def print_telemetry_event(payload: Dict[Text, Any]) -> None:
"""Print a telemetry events payload to the commandline.
Args:
payload: payload of the event
"""
print("Telemetry Event:")
print(json.dumps(payload, indent=2))
def _send_event(
distinct_id: Text,
event_name: Text,
properties: Dict[Text, Any],
context: Dict[Text, Any],
) -> None:
"""Report the contents segmentof an event to the /track Segment endpoint.
Documentation: https://.com/docs/sources/server/http/
Do not call this function from outside telemetry.py! This function does not
check if telemetry is enabled or not.
Args:
distinct_id: Unique telemetry ID.
event_name: Name of the event.
properties: Values to report along the event.
context: Context information about the event.
"""
payload = segment_request_payload(distinct_id, event_name, properties, context)
if _is_telemetry_debug_enabled():
print_telemetry_event(payload)
return
write_key = telemetry_write_key()
if not write_key:
# If TELEMETRY_WRITE_KEY is empty or `None`, telemetry has not been
# enabled for this build (e.g. because it is running from source)
logger.debug("Skipping request to external service: telemetry key not set.")
return
headers = segment_request_header(write_key)
resp = requests.post(
SEGMENT_ENDPOINT, headers=headers, json=payload, timeout=SEGMENT_REQUEST_TIMEOUT
)
# handle different failure cases
if resp.status_code != 200:
logger.debug(
f"Segment telemetry request returned a {resp.status_code} response. "
f"Body: {resp.text}"
)
else:
data = resp.json()
if not data.get("success"):
logger.debug(
f"Segment telemetry request returned a failure. Response: {data}"
)
def _hash_directory_path(path: Text) -> Optional[Text]:
"""Create a hash for the directory.
Returns:
hash of the directories path
"""
full_path = Path(path).absolute()
return hashlib.sha256(str(full_path).encode("utf-8")).hexdigest()
# noinspection PyBroadException
def _is_docker() -> bool:
"""Guess if we are running in docker environment.
Returns:
`True` if we are running inside docker, `False` otherwise.
"""
# first we try to use the env
try:
os.stat("/.dockerenv")
return True
except Exception: # skipcq:PYL-W0703
pass
# if that didn't work, try to use proc information
try:
return "docker" in rasa.shared.utils.io.read_file("/proc/self/cgroup", "utf8")
except Exception: # skipcq:PYL-W0703
return False
def with_default_context_fields(
context: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Return a new context dictionary that contains the default field values merged
with the provided ones. The default fields contain only the OS information for now.
Args:
context: Context information about the event.
Return:
A new context.
"""
context = context or {}
return {**_default_context_fields(), **context}
def _default_context_fields() -> Dict[Text, Any]:
"""Return a dictionary that contains the default context values.
Return:
A new context containing information about the runtime environment.
"""
global TELEMETRY_CONTEXT
if not TELEMETRY_CONTEXT:
# Make sure to update the example in docs/docs/telemetry/telemetry.mdx
# if you change / add context
TELEMETRY_CONTEXT = {
"os": {"name": platform.system(), "version": platform.release()},
"ci": in_continuous_integration(),
"project": model.project_fingerprint(),
"directory": _hash_directory_path(os.getcwd()),
"python": sys.version.split(" ")[0],
"rasa_open_source": rasa.__version__,
"cpu": multiprocessing.cpu_count(),
"docker": _is_docker(),
}
# avoid returning the cached dict --> caller could modify the dictionary...
# usually we would use `lru_cache`, but that doesn't return a dict copy and
# doesn't work on inner functions, so we need to roll our own caching...
return TELEMETRY_CONTEXT.copy()
def _track(
event_name: Text,
properties: Optional[Dict[Text, Any]] = None,
context: Optional[Dict[Text, Any]] = None,
) -> None:
"""Tracks a telemetry event.
It is OK to use this function from outside telemetry.py, but note that it
is recommended to create a new track_xyz() function for complex telemetry
events, or events that are generated from many parts of the Rasa Open Source code.
Args:
event_name: Name of the event.
properties: Dictionary containing the event's properties.
context: Dictionary containing some context for this event.
"""
try:
telemetry_id = get_telemetry_id()
if not telemetry_id:
logger.debug("Will not report telemetry events as no ID was found.")
return
if not properties:
properties = {}
properties[TELEMETRY_ID] = telemetry_id
_send_event(
telemetry_id, event_name, properties, with_default_context_fields(context)
)
except Exception as e: # skipcq:PYL-W0703
logger.debug(f"Skipping telemetry reporting: {e}")
def get_telemetry_id() -> Optional[Text]:
"""Return the unique telemetry identifier for this Rasa Open Source install.
The identifier can be any string, but it should be a UUID.
Returns:
The identifier, if it is configured correctly.
"""
try:
telemetry_config = (
rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY) or {}
)
return telemetry_config.get(CONFIG_TELEMETRY_ID)
except Exception as e: # skipcq:PYL-W0703
logger.debug(f"Unable to retrieve telemetry ID: {e}")
return None
def toggle_telemetry_reporting(is_enabled: bool) -> None:
"""Write to the configuration if telemetry tracking should be enabled or disabled.
Args:
is_enabled: `True` if the telemetry reporting should be enabled,
`False` otherwise.
"""
configuration = rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY)
if configuration:
configuration[CONFIG_TELEMETRY_ENABLED] = is_enabled
else:
configuration = _default_telemetry_configuration(is_enabled)
rasa_utils.write_global_config_value(CONFIG_FILE_TELEMETRY_KEY, configuration)
def strip_sensitive_data_from_sentry_event(
event: Dict[Text, Any], _unused_hint: Optional[Dict[Text, Any]] = None
) -> Optional[Dict[Text, Any]]:
"""Remove any sensitive data from the event (e.g. path names).
Args:
event: event to be logged to sentry
_unused_hint: some hinting information sent alongside of the event
Returns:
the event without any sensitive / PII data or `None` if the event should
be discarded.
"""
# removes any paths from stack traces (avoids e.g. sending
# a users home directory name if package is installed there)
for value in event.get("exception", {}).get("values", []):
for frame in value.get("stacktrace", {}).get("frames", []):
frame["abs_path"] = ""
if f"rasa_sdk{os.path.sep}executor.py" in frame["filename"]:
# this looks a lot like an exception in the SDK and hence custom code
# no need for us to deal with that
return None
elif "site-packages" in frame["filename"]:
# drop site-packages and following slash / backslash
relative_name = frame["filename"].split("site-packages")[-1][1:]
frame["filename"] = os.path.join("site-packages", relative_name)
elif "dist-packages" in frame["filename"]:
# drop dist-packages and following slash / backslash
relative_name = frame["filename"].split("dist-packages")[-1][1:]
frame["filename"] = os.path.join("dist-packages", relative_name)
elif os.path.isabs(frame["filename"]):
# if the file path is absolute, we'll drop the whole event as this is
# very likely custom code. needs to happen after cleaning as
# site-packages / dist-packages paths are also absolute, but fine.
return None
return event
@ensure_telemetry_enabled
def initialize_error_reporting() -> None:
"""Sets up automated error reporting.
Exceptions are reported to sentry. We avoid sending any metadata (local
variables, paths, ...) to make sure we don't compromise any data. Only the
exception and its stacktrace is logged and only if the exception origins
from the `rasa` package."""
import sentry_sdk
from sentry_sdk import configure_scope
from sentry_sdk.integrations.atexit import AtexitIntegration
from sentry_sdk.integrations.dedupe import DedupeIntegration
from sentry_sdk.integrations.excepthook import ExcepthookIntegration
# key for local testing can be found at
# https://sentry.io/settings/rasahq/projects/rasa-open-source/install/python/
# for local testing, set the key using `RASA_EXCEPTION_WRITE_KEY=key rasa <command>`
key = sentry_write_key()
if not key:
return
telemetry_id = get_telemetry_id()
# this is a very defensive configuration, avoiding as many integrations as
# possible. it also submits very little data (exception with error message
# and line numbers).
sentry_sdk.init(
f"https://{key}.ingest.sentry.io/2801673",
before_send=strip_sensitive_data_from_sentry_event,
integrations=[
ExcepthookIntegration(),
DedupeIntegration(),
AtexitIntegration(lambda _, __: None),
],
send_default_pii=False, # activate PII filter
server_name=telemetry_id or "UNKNOWN",
ignore_errors=[
# std lib errors
KeyboardInterrupt, # user hit the interrupt key (Ctrl+C)
MemoryError, # machine is running out of memory
NotImplementedError, # user is using a feature that is not implemented
asyncio.CancelledError, # an async operation has been cancelled by the user
# expected Rasa errors
RasaException,
],
in_app_include=["rasa"], # only submit errors in this package
with_locals=False, # don't submit local variables
release=f"rasa-{rasa.__version__}",
default_integrations=False,
environment="development" if in_continuous_integration() else "production",
)
if not telemetry_id:
return
with configure_scope() as scope:
# sentry added these more recently, just a protection in a case where a
# user has installed an older version of sentry
if hasattr(scope, "set_user"):
scope.set_user({"id": telemetry_id})
default_context = _default_context_fields()
if hasattr(scope, "set_context"):
if "os" in default_context:
# os is a nested dict, hence we report it separately
scope.set_context("Operating System", default_context.pop("os"))
scope.set_context("Environment", default_context)
@async_generator.asynccontextmanager
async def track_model_training(
training_data: "TrainingDataImporter", model_type: Text, is_finetuning: bool = False
) -> typing.AsyncGenerator[None, None]:
"""Track a model training started.
WARNING: since this is a generator, it can't use the ensure telemetry
decorator. We need to manually add these checks here. This can be
fixed as soon as we drop python 3.6 support.
Args:
training_data: Training data used for the training.
model_type: Specifies the type of training, should be either "rasa", "core"
or "nlu".
is_finetuning: `True` if the model is trained by finetuning another model.
"""
if not initialize_telemetry():
# telemetry reporting is disabled. we won't do any reporting
yield # runs the training
return # closes the async context
config = await training_data.get_config()
stories = await training_data.get_stories()
nlu_data = await training_data.get_nlu_data()
domain = await training_data.get_domain()
count_conditional_responses = domain.count_conditional_response_variations()
training_id = uuid.uuid4().hex
# Make sure to update the example in docs/docs/telemetry/telemetry.mdx
# if you change / add any properties
_track(
TRAINING_STARTED_EVENT,
{
"language": config.get("language"),
"training_id": training_id,
"type": model_type,
"pipeline": config.get("pipeline"),
"policies": config.get("policies"),
"num_intent_examples": len(nlu_data.intent_examples),
"num_entity_examples": len(nlu_data.entity_examples),
"num_actions": len(domain.action_names_or_texts),
# Old nomenclature from when 'responses' were still called
# 'templates' in the domain
"num_templates": len(domain.responses),
"num_conditional_response_variations": count_conditional_responses,
"num_slots": len(domain.slots),
"num_forms": len(domain.forms),
"num_intents": len(domain.intents),
"num_entities": len(domain.entities),
"num_story_steps": len(stories.story_steps),
"num_lookup_tables": len(nlu_data.lookup_tables),
"num_synonyms": len(nlu_data.entity_synonyms),
"num_regexes": len(nlu_data.regex_features),
"is_finetuning": is_finetuning,
},
)
start = datetime.now()
yield
runtime = datetime.now() - start
_track(
TRAINING_COMPLETED_EVENT,
{
"training_id": training_id,
"type": model_type,
"runtime": int(runtime.total_seconds()),
},
)
@ensure_telemetry_enabled
def track_telemetry_disabled() -> None:
"""Track when a user disables telemetry."""
_track(TELEMETRY_DISABLED_EVENT)
@ensure_telemetry_enabled
def track_data_split(fraction: float, data_type: Text) -> None:
"""Track when a user splits data.
Args:
fraction: How much data goes into train and how much goes into test
data_type: Is this core, nlu or nlg data
"""
_track(TELEMETRY_DATA_SPLIT_EVENT, {"fraction": fraction, "type": data_type})
@ensure_telemetry_enabled
def track_validate_files(validation_success: bool) -> None:
"""Track when a user validates data files.
Args:
validation_success: Whether the validation was successful
"""
_track(TELEMETRY_DATA_VALIDATED_EVENT, {"validation_success": validation_success})
@ensure_telemetry_enabled
def track_data_convert(output_format: Text, data_type: Text) -> None:
"""Track when a user converts data.
Args:
output_format: Target format for the converter
data_type: Is this core, nlu or nlg data
"""
_track(
TELEMETRY_DATA_CONVERTED_EVENT,
{"output_format": output_format, "type": data_type},
)
@ensure_telemetry_enabled
def track_tracker_export(
number_of_exported_events: int,
tracker_store: "TrackerStore",
event_broker: "EventBroker",
) -> None:
"""Track when a user exports trackers.
Args:
number_of_exported_events: Number of events that got exported
tracker_store: Store used to retrieve the events from
event_broker: Broker the events are getting published towards
"""
_track(
TELEMETRY_TRACKER_EXPORTED_EVENT,
{
"number_of_exported_events": number_of_exported_events,
"tracker_store": type(tracker_store).__name__,
"event_broker": type(event_broker).__name__,
},
)
@ensure_telemetry_enabled
def track_interactive_learning_start(
skip_visualization: bool, save_in_e2e: bool
) -> None:
"""Track when a user starts an interactive learning session.
Args:
skip_visualization: Is visualization skipped in this session
save_in_e2e: Is e2e used in this session
"""
_track(
TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT,
{"skip_visualization": skip_visualization, "save_in_e2e": save_in_e2e},
)
@ensure_telemetry_enabled
def track_server_start(
input_channels: List["InputChannel"],
endpoints: Optional["AvailableEndpoints"],
model_directory: Optional[Text],
number_of_workers: int,
is_api_enabled: bool,
) -> None:
"""Track when a user starts a rasa server.
Args:
input_channels: Used input channels
endpoints: Endpoint configuration for the server
model_directory: directory of the running model
number_of_workers: number of used Sanic workers
is_api_enabled: whether the rasa API server is enabled
"""
from rasa.core.utils import AvailableEndpoints
def project_fingerprint_from_model(
_model_directory: Optional[Text],
) -> Optional[Text]:
"""Get project fingerprint from an app's loaded model."""
if _model_directory:
try:
with model.get_model(_model_directory) as unpacked_model:
fingerprint = model.fingerprint_from_path(unpacked_model)
return fingerprint.get(model.FINGERPRINT_PROJECT)
except Exception:
return None
return None
if not endpoints:
endpoints = AvailableEndpoints()
_track(
TELEMETRY_SERVER_STARTED_EVENT,
{
"input_channels": [i.name() for i in input_channels],
"api_enabled": is_api_enabled,
"number_of_workers": number_of_workers,
"endpoints_nlg": endpoints.nlg.type if endpoints.nlg else None,
"endpoints_nlu": endpoints.nlu.type if endpoints.nlu else None,
"endpoints_action_server": endpoints.action.type
if endpoints.action
else None,
"endpoints_model_server": endpoints.model.type if endpoints.model else None,
"endpoints_tracker_store": endpoints.tracker_store.type
if endpoints.tracker_store
else None,
"endpoints_lock_store": endpoints.lock_store.type
if endpoints.lock_store
else None,
"endpoints_event_broker": endpoints.event_broker.type
if endpoints.event_broker
else None,
"project": project_fingerprint_from_model(model_directory),
},
)
@ensure_telemetry_enabled
def track_project_init(path: Text) -> None:
"""Track when a user creates a project using rasa init.
Args:
path: Location of the project
"""
_track(
TELEMETRY_PROJECT_CREATED_EVENT, {"init_directory": _hash_directory_path(path)}
)
@ensure_telemetry_enabled
def track_shell_started(model_type: Text) -> None:
"""Track when a user starts a bot using rasa shell.
Args:
model_type: Type of the model, core / nlu or rasa."""
_track(TELEMETRY_SHELL_STARTED_EVENT, {"type": model_type})
@ensure_telemetry_enabled
def track_rasa_x_local() -> None:
"""Track when a user runs Rasa X in local mode."""
_track(TELEMETRY_RASA_X_LOCAL_STARTED_EVENT)
@ensure_telemetry_enabled
def track_visualization() -> None:
"""Track when a user runs the visualization."""
_track(TELEMETRY_VISUALIZATION_STARTED_EVENT)
@ensure_telemetry_enabled
def track_core_model_test(num_story_steps: int, e2e: bool, agent: "Agent") -> None:
"""Track when a user tests a core model.
Args:
num_story_steps: Number of test stories used for the comparison
e2e: indicator if tests running in end to end mode
agent: Agent of the model getting tested
"""
fingerprint = model.fingerprint_from_path(agent.model_directory or "")
project = fingerprint.get(model.FINGERPRINT_PROJECT)
_track(
TELEMETRY_TEST_CORE_EVENT,
{"project": project, "end_to_end": e2e, "num_story_steps": num_story_steps},
)
@ensure_telemetry_enabled
def track_nlu_model_test(test_data: "TrainingData") -> None:
"""Track when a user tests an nlu model.
Args:
test_data: Data used for testing
"""
_track(
TELEMETRY_TEST_NLU_EVENT,
{
"num_intent_examples": len(test_data.intent_examples),
"num_entity_examples": len(test_data.entity_examples),
"num_lookup_tables": len(test_data.lookup_tables),
"num_synonyms": len(test_data.entity_synonyms),
"num_regexes": len(test_data.regex_features),
},
)
| 33.68377 | 88 | 0.681578 | import asyncio
from datetime import datetime
from functools import wraps
import hashlib
import json
import logging
import multiprocessing
import os
from pathlib import Path
import platform
import sys
import textwrap
import typing
from typing import Any, Callable, Dict, List, Optional, Text
import uuid
import async_generator
import requests
from terminaltables import SingleTable
import rasa
from rasa import model
from rasa.constants import (
CONFIG_FILE_TELEMETRY_KEY,
CONFIG_TELEMETRY_DATE,
CONFIG_TELEMETRY_ENABLED,
CONFIG_TELEMETRY_ID,
)
from rasa.shared.constants import DOCS_URL_TELEMETRY
from rasa.shared.exceptions import RasaException
import rasa.shared.utils.io
from rasa.utils import common as rasa_utils
import rasa.utils.io
if typing.TYPE_CHECKING:
from rasa.core.brokers.broker import EventBroker
from rasa.core.tracker_store import TrackerStore
from rasa.core.channels.channel import InputChannel
from rasa.core.agent import Agent
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.core.utils import AvailableEndpoints
logger = logging.getLogger(__name__)
SEGMENT_ENDPOINT = "https://api.segment.io/v1/track"
SEGMENT_REQUEST_TIMEOUT = 5
TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_ENABLED"
TELEMETRY_DEBUG_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_DEBUG"
TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_WRITE_KEY"
EXCEPTION_WRITE_KEY_ENVIRONMENT_VARIABLE = "RASA_EXCEPTION_WRITE_KEY"
TELEMETRY_ID = "metrics_id"
TELEMETRY_ENABLED_BY_DEFAULT = True
CI_ENVIRONMENT_TELL = [
"bamboo.buildKey",
"BUILD_ID",
"BUILD_NUMBER",
"BUILDKITE",
"CI",
"CIRCLECI",
"CONTINUOUS_INTEGRATION",
"GITHUB_ACTIONS",
"HUDSON_URL",
"JENKINS_URL",
"TEAMCITY_VERSION",
"TRAVIS",
]
TRAINING_STARTED_EVENT = "Training Started"
TRAINING_COMPLETED_EVENT = "Training Completed"
TELEMETRY_DISABLED_EVENT = "Telemetry Disabled"
TELEMETRY_DATA_SPLIT_EVENT = "Training Data Split"
TELEMETRY_DATA_VALIDATED_EVENT = "Training Data Validated"
TELEMETRY_DATA_CONVERTED_EVENT = "Training Data Converted"
TELEMETRY_TRACKER_EXPORTED_EVENT = "Tracker Exported"
TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT = "Interactive Learning Started"
TELEMETRY_SERVER_STARTED_EVENT = "Server Started"
TELEMETRY_PROJECT_CREATED_EVENT = "Project Created"
TELEMETRY_SHELL_STARTED_EVENT = "Shell Started"
TELEMETRY_RASA_X_LOCAL_STARTED_EVENT = "Rasa X Local Started"
TELEMETRY_VISUALIZATION_STARTED_EVENT = "Story Visualization Started"
TELEMETRY_TEST_CORE_EVENT = "Model Core Tested"
TELEMETRY_TEST_NLU_EVENT = "Model NLU Tested"
TELEMETRY_CONTEXT = None
def print_telemetry_reporting_info() -> None:
message = textwrap.dedent(
f"""
Rasa Open Source reports anonymous usage telemetry to help improve the product
for all its users.
If you'd like to opt-out, you can use `rasa telemetry disable`.
To learn more, check out {DOCS_URL_TELEMETRY}."""
).strip()
table = SingleTable([[message]])
print(table.table)
def _default_telemetry_configuration(is_enabled: bool) -> Dict[Text, Any]:
return {
CONFIG_TELEMETRY_ENABLED: is_enabled,
CONFIG_TELEMETRY_ID: uuid.uuid4().hex,
CONFIG_TELEMETRY_DATE: datetime.now(),
}
def _write_default_telemetry_configuration(
is_enabled: bool = TELEMETRY_ENABLED_BY_DEFAULT,
) -> bool:
new_config = _default_telemetry_configuration(is_enabled)
success = rasa_utils.write_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, new_config
)
# Do not show info if user has enabled/disabled telemetry via env var
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if is_enabled and success and telemetry_environ is None:
print_telemetry_reporting_info()
return success
def _is_telemetry_enabled_in_configuration() -> bool:
try:
stored_config = rasa_utils.read_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, unavailable_ok=False
)
return stored_config[CONFIG_TELEMETRY_ENABLED]
except ValueError as e:
logger.debug(f"Could not read telemetry settings from configuration file: {e}")
# seems like there is no config, we'll create one and enable telemetry
success = _write_default_telemetry_configuration()
return TELEMETRY_ENABLED_BY_DEFAULT and success
def is_telemetry_enabled() -> bool:
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if telemetry_environ is not None:
return telemetry_environ.lower() == "true"
try:
return rasa_utils.read_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, unavailable_ok=False
)[CONFIG_TELEMETRY_ENABLED]
except ValueError:
return False
def initialize_telemetry() -> bool:
try:
is_enabled_in_configuration = _is_telemetry_enabled_in_configuration()
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if telemetry_environ is None:
return is_enabled_in_configuration
return telemetry_environ.lower() == "true"
except Exception as e:
logger.exception(
f"Failed to initialize telemetry reporting: {e}."
f"Telemetry reporting will be disabled."
)
return False
def ensure_telemetry_enabled(f: Callable[..., Any]) -> Callable[..., Any]:
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def decorated_coroutine(*args: Any, **kwargs: Any) -> Any:
if is_telemetry_enabled():
return await f(*args, **kwargs)
return None
return decorated_coroutine
@wraps(f)
def decorated(*args: Any, **kwargs: Any) -> Any:
if is_telemetry_enabled():
return f(*args, **kwargs)
return None
return decorated
def _fetch_write_key(tool: Text, environment_variable: Text) -> Optional[Text]:
import pkg_resources
from rasa import __name__ as name
if os.environ.get(environment_variable):
return os.environ.get(environment_variable)
write_key_path = pkg_resources.resource_filename(name, "keys")
try:
with open(write_key_path) as f:
return json.load(f).get(tool)
except Exception:
return None
def telemetry_write_key() -> Optional[Text]:
return _fetch_write_key("segment", TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE)
def sentry_write_key() -> Optional[Text]:
return _fetch_write_key("sentry", EXCEPTION_WRITE_KEY_ENVIRONMENT_VARIABLE)
def _encode_base64(original: Text, encoding: Text = "utf-8") -> Text:
import base64
return base64.b64encode(original.encode(encoding)).decode(encoding)
def segment_request_header(write_key: Text) -> Dict[Text, Any]:
return {
"Authorization": "Basic {}".format(_encode_base64(write_key + ":")),
"Content-Type": "application/json",
}
def segment_request_payload(
distinct_id: Text,
event_name: Text,
properties: Dict[Text, Any],
context: Dict[Text, Any],
) -> Dict[Text, Any]:
return {
"userId": distinct_id,
"event": event_name,
"properties": properties,
"context": context,
}
def in_continuous_integration() -> bool:
return any(env in os.environ for env in CI_ENVIRONMENT_TELL)
def _is_telemetry_debug_enabled() -> bool:
return (
os.environ.get(TELEMETRY_DEBUG_ENVIRONMENT_VARIABLE, "false").lower() == "true"
)
def print_telemetry_event(payload: Dict[Text, Any]) -> None:
print("Telemetry Event:")
print(json.dumps(payload, indent=2))
def _send_event(
distinct_id: Text,
event_name: Text,
properties: Dict[Text, Any],
context: Dict[Text, Any],
) -> None:
payload = segment_request_payload(distinct_id, event_name, properties, context)
if _is_telemetry_debug_enabled():
print_telemetry_event(payload)
return
write_key = telemetry_write_key()
if not write_key:
logger.debug("Skipping request to external service: telemetry key not set.")
return
headers = segment_request_header(write_key)
resp = requests.post(
SEGMENT_ENDPOINT, headers=headers, json=payload, timeout=SEGMENT_REQUEST_TIMEOUT
)
if resp.status_code != 200:
logger.debug(
f"Segment telemetry request returned a {resp.status_code} response. "
f"Body: {resp.text}"
)
else:
data = resp.json()
if not data.get("success"):
logger.debug(
f"Segment telemetry request returned a failure. Response: {data}"
)
def _hash_directory_path(path: Text) -> Optional[Text]:
full_path = Path(path).absolute()
return hashlib.sha256(str(full_path).encode("utf-8")).hexdigest()
def _is_docker() -> bool:
try:
os.stat("/.dockerenv")
return True
except Exception:
pass
try:
return "docker" in rasa.shared.utils.io.read_file("/proc/self/cgroup", "utf8")
except Exception: # skipcq:PYL-W0703
return False
def with_default_context_fields(
context: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
context = context or {}
return {**_default_context_fields(), **context}
def _default_context_fields() -> Dict[Text, Any]:
global TELEMETRY_CONTEXT
if not TELEMETRY_CONTEXT:
# Make sure to update the example in docs/docs/telemetry/telemetry.mdx
# if you change / add context
TELEMETRY_CONTEXT = {
"os": {"name": platform.system(), "version": platform.release()},
"ci": in_continuous_integration(),
"project": model.project_fingerprint(),
"directory": _hash_directory_path(os.getcwd()),
"python": sys.version.split(" ")[0],
"rasa_open_source": rasa.__version__,
"cpu": multiprocessing.cpu_count(),
"docker": _is_docker(),
}
# avoid returning the cached dict --> caller could modify the dictionary...
# usually we would use `lru_cache`, but that doesn't return a dict copy and
return TELEMETRY_CONTEXT.copy()
def _track(
event_name: Text,
properties: Optional[Dict[Text, Any]] = None,
context: Optional[Dict[Text, Any]] = None,
) -> None:
try:
telemetry_id = get_telemetry_id()
if not telemetry_id:
logger.debug("Will not report telemetry events as no ID was found.")
return
if not properties:
properties = {}
properties[TELEMETRY_ID] = telemetry_id
_send_event(
telemetry_id, event_name, properties, with_default_context_fields(context)
)
except Exception as e: # skipcq:PYL-W0703
logger.debug(f"Skipping telemetry reporting: {e}")
def get_telemetry_id() -> Optional[Text]:
try:
telemetry_config = (
rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY) or {}
)
return telemetry_config.get(CONFIG_TELEMETRY_ID)
except Exception as e: # skipcq:PYL-W0703
logger.debug(f"Unable to retrieve telemetry ID: {e}")
return None
def toggle_telemetry_reporting(is_enabled: bool) -> None:
configuration = rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY)
if configuration:
configuration[CONFIG_TELEMETRY_ENABLED] = is_enabled
else:
configuration = _default_telemetry_configuration(is_enabled)
rasa_utils.write_global_config_value(CONFIG_FILE_TELEMETRY_KEY, configuration)
def strip_sensitive_data_from_sentry_event(
event: Dict[Text, Any], _unused_hint: Optional[Dict[Text, Any]] = None
) -> Optional[Dict[Text, Any]]:
# removes any paths from stack traces (avoids e.g. sending
# a users home directory name if package is installed there)
for value in event.get("exception", {}).get("values", []):
for frame in value.get("stacktrace", {}).get("frames", []):
frame["abs_path"] = ""
if f"rasa_sdk{os.path.sep}executor.py" in frame["filename"]:
# this looks a lot like an exception in the SDK and hence custom code
# no need for us to deal with that
return None
elif "site-packages" in frame["filename"]:
# drop site-packages and following slash / backslash
relative_name = frame["filename"].split("site-packages")[-1][1:]
frame["filename"] = os.path.join("site-packages", relative_name)
elif "dist-packages" in frame["filename"]:
# drop dist-packages and following slash / backslash
relative_name = frame["filename"].split("dist-packages")[-1][1:]
frame["filename"] = os.path.join("dist-packages", relative_name)
elif os.path.isabs(frame["filename"]):
# if the file path is absolute, we'll drop the whole event as this is
return None
return event
@ensure_telemetry_enabled
def initialize_error_reporting() -> None:
import sentry_sdk
from sentry_sdk import configure_scope
from sentry_sdk.integrations.atexit import AtexitIntegration
from sentry_sdk.integrations.dedupe import DedupeIntegration
from sentry_sdk.integrations.excepthook import ExcepthookIntegration
key = sentry_write_key()
if not key:
return
telemetry_id = get_telemetry_id()
sentry_sdk.init(
f"https://{key}.ingest.sentry.io/2801673",
before_send=strip_sensitive_data_from_sentry_event,
integrations=[
ExcepthookIntegration(),
DedupeIntegration(),
AtexitIntegration(lambda _, __: None),
],
send_default_pii=False,
server_name=telemetry_id or "UNKNOWN",
ignore_errors=[
KeyboardInterrupt,
MemoryError,
NotImplementedError,
asyncio.CancelledError,
RasaException,
],
in_app_include=["rasa"],
with_locals=False,
release=f"rasa-{rasa.__version__}",
default_integrations=False,
environment="development" if in_continuous_integration() else "production",
)
if not telemetry_id:
return
with configure_scope() as scope:
# sentry added these more recently, just a protection in a case where a
# user has installed an older version of sentry
if hasattr(scope, "set_user"):
scope.set_user({"id": telemetry_id})
default_context = _default_context_fields()
if hasattr(scope, "set_context"):
if "os" in default_context:
# os is a nested dict, hence we report it separately
scope.set_context("Operating System", default_context.pop("os"))
scope.set_context("Environment", default_context)
@async_generator.asynccontextmanager
async def track_model_training(
training_data: "TrainingDataImporter", model_type: Text, is_finetuning: bool = False
) -> typing.AsyncGenerator[None, None]:
if not initialize_telemetry():
# telemetry reporting is disabled. we won't do any reporting
yield
return
config = await training_data.get_config()
stories = await training_data.get_stories()
nlu_data = await training_data.get_nlu_data()
domain = await training_data.get_domain()
count_conditional_responses = domain.count_conditional_response_variations()
training_id = uuid.uuid4().hex
_track(
TRAINING_STARTED_EVENT,
{
"language": config.get("language"),
"training_id": training_id,
"type": model_type,
"pipeline": config.get("pipeline"),
"policies": config.get("policies"),
"num_intent_examples": len(nlu_data.intent_examples),
"num_entity_examples": len(nlu_data.entity_examples),
"num_actions": len(domain.action_names_or_texts),
"num_templates": len(domain.responses),
"num_conditional_response_variations": count_conditional_responses,
"num_slots": len(domain.slots),
"num_forms": len(domain.forms),
"num_intents": len(domain.intents),
"num_entities": len(domain.entities),
"num_story_steps": len(stories.story_steps),
"num_lookup_tables": len(nlu_data.lookup_tables),
"num_synonyms": len(nlu_data.entity_synonyms),
"num_regexes": len(nlu_data.regex_features),
"is_finetuning": is_finetuning,
},
)
start = datetime.now()
yield
runtime = datetime.now() - start
_track(
TRAINING_COMPLETED_EVENT,
{
"training_id": training_id,
"type": model_type,
"runtime": int(runtime.total_seconds()),
},
)
@ensure_telemetry_enabled
def track_telemetry_disabled() -> None:
_track(TELEMETRY_DISABLED_EVENT)
@ensure_telemetry_enabled
def track_data_split(fraction: float, data_type: Text) -> None:
_track(TELEMETRY_DATA_SPLIT_EVENT, {"fraction": fraction, "type": data_type})
@ensure_telemetry_enabled
def track_validate_files(validation_success: bool) -> None:
_track(TELEMETRY_DATA_VALIDATED_EVENT, {"validation_success": validation_success})
@ensure_telemetry_enabled
def track_data_convert(output_format: Text, data_type: Text) -> None:
_track(
TELEMETRY_DATA_CONVERTED_EVENT,
{"output_format": output_format, "type": data_type},
)
@ensure_telemetry_enabled
def track_tracker_export(
number_of_exported_events: int,
tracker_store: "TrackerStore",
event_broker: "EventBroker",
) -> None:
_track(
TELEMETRY_TRACKER_EXPORTED_EVENT,
{
"number_of_exported_events": number_of_exported_events,
"tracker_store": type(tracker_store).__name__,
"event_broker": type(event_broker).__name__,
},
)
@ensure_telemetry_enabled
def track_interactive_learning_start(
skip_visualization: bool, save_in_e2e: bool
) -> None:
_track(
TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT,
{"skip_visualization": skip_visualization, "save_in_e2e": save_in_e2e},
)
@ensure_telemetry_enabled
def track_server_start(
input_channels: List["InputChannel"],
endpoints: Optional["AvailableEndpoints"],
model_directory: Optional[Text],
number_of_workers: int,
is_api_enabled: bool,
) -> None:
from rasa.core.utils import AvailableEndpoints
def project_fingerprint_from_model(
_model_directory: Optional[Text],
) -> Optional[Text]:
if _model_directory:
try:
with model.get_model(_model_directory) as unpacked_model:
fingerprint = model.fingerprint_from_path(unpacked_model)
return fingerprint.get(model.FINGERPRINT_PROJECT)
except Exception:
return None
return None
if not endpoints:
endpoints = AvailableEndpoints()
_track(
TELEMETRY_SERVER_STARTED_EVENT,
{
"input_channels": [i.name() for i in input_channels],
"api_enabled": is_api_enabled,
"number_of_workers": number_of_workers,
"endpoints_nlg": endpoints.nlg.type if endpoints.nlg else None,
"endpoints_nlu": endpoints.nlu.type if endpoints.nlu else None,
"endpoints_action_server": endpoints.action.type
if endpoints.action
else None,
"endpoints_model_server": endpoints.model.type if endpoints.model else None,
"endpoints_tracker_store": endpoints.tracker_store.type
if endpoints.tracker_store
else None,
"endpoints_lock_store": endpoints.lock_store.type
if endpoints.lock_store
else None,
"endpoints_event_broker": endpoints.event_broker.type
if endpoints.event_broker
else None,
"project": project_fingerprint_from_model(model_directory),
},
)
@ensure_telemetry_enabled
def track_project_init(path: Text) -> None:
_track(
TELEMETRY_PROJECT_CREATED_EVENT, {"init_directory": _hash_directory_path(path)}
)
@ensure_telemetry_enabled
def track_shell_started(model_type: Text) -> None:
_track(TELEMETRY_SHELL_STARTED_EVENT, {"type": model_type})
@ensure_telemetry_enabled
def track_rasa_x_local() -> None:
_track(TELEMETRY_RASA_X_LOCAL_STARTED_EVENT)
@ensure_telemetry_enabled
def track_visualization() -> None:
_track(TELEMETRY_VISUALIZATION_STARTED_EVENT)
@ensure_telemetry_enabled
def track_core_model_test(num_story_steps: int, e2e: bool, agent: "Agent") -> None:
fingerprint = model.fingerprint_from_path(agent.model_directory or "")
project = fingerprint.get(model.FINGERPRINT_PROJECT)
_track(
TELEMETRY_TEST_CORE_EVENT,
{"project": project, "end_to_end": e2e, "num_story_steps": num_story_steps},
)
@ensure_telemetry_enabled
def track_nlu_model_test(test_data: "TrainingData") -> None:
_track(
TELEMETRY_TEST_NLU_EVENT,
{
"num_intent_examples": len(test_data.intent_examples),
"num_entity_examples": len(test_data.entity_examples),
"num_lookup_tables": len(test_data.lookup_tables),
"num_synonyms": len(test_data.entity_synonyms),
"num_regexes": len(test_data.regex_features),
},
)
| true | true |
f7f4b07d9bb172bca5a017090812d865965668dd | 32,713 | py | Python | xrootdSiteMover.py | virthead/COMPASS-multijob-pilot | beac49ec432d24382d4d23aacfe6c9674a59e118 | [
"Apache-2.0"
] | null | null | null | xrootdSiteMover.py | virthead/COMPASS-multijob-pilot | beac49ec432d24382d4d23aacfe6c9674a59e118 | [
"Apache-2.0"
] | null | null | null | xrootdSiteMover.py | virthead/COMPASS-multijob-pilot | beac49ec432d24382d4d23aacfe6c9674a59e118 | [
"Apache-2.0"
] | null | null | null | # xrootdSiteMover.py
""" Site mover used at e.g. UTA, SLACXRD """
import os
import shutil
import commands
import urllib
from time import time
import SiteMover
from re import compile, findall
from futil import *
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, verifySetupCommand, getSiteInformation
from config import config_sm
from FileStateClient import updateFileState
from timed_command import timed_command
PERMISSIONS_DIR = config_sm.PERMISSIONS_DIR
PERMISSIONS_FILE = config_sm.PERMISSIONS_FILE
CMD_CHECKSUM = config_sm.COMMAND_MD5
ARCH_DEFAULT = config_sm.ARCH_DEFAULT
class xrootdSiteMover(SiteMover.SiteMover):
"""
File movers move files between a SE (of different kind) and a local directory
where all posix operations have to be supported and fast access is supposed
get_data: SE->local
put_data: local->SE
check_space: available space in SE
This is the Default SiteMover, the SE has to be locally accessible for all the WNs
and all commands like cp, mkdir, md5checksum have to be available on files in the SE
E.g. NFS exported file system
"""
__childDict = {}
copyCommand = "xcp"
checksum_command = "adler32"
permissions_DIR = PERMISSIONS_DIR
permissions_FILE = PERMISSIONS_FILE
arch_type = ARCH_DEFAULT
timeout = 5*3600
def __init__(self, setup_path='', *args, **kwrds):
""" default init """
self._setup = setup_path
tolog("Init is using _setup: %s" % (self._setup))
def get_timeout(self):
return self.timeout
def getID(self):
""" returnd SM ID, the copy command used for it """
return self.copyCommand
def getSetup(self):
""" returns the setup string (pacman setup os setup script) for the copy command """
return self._setup
def getCopytool(self, setup):
""" determine which copy command to use """
cmd = "which xcp"
cpt = "cp"
try:
rs = commands.getoutput("%s which xcp" % (setup))
except Exception, e:
tolog("!!WARNING!!2999!! Failed the copy command test: %s" % str(e))
else:
if rs.find("no xcp") >= 0:
cpt = "cp"
else:
cpt = "xcp"
tolog("Will use %s to transfer file" % (cpt))
return cpt
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
"""
Moves a DS file the local SE (where was put from DDM) to the working directory.
Performs the copy and, for systems supporting it, checks size and md5sum correctness
gpfn: full source URL (e.g. method://[host[:port]/full-dir-path/filename - a SRM URL is OK)
path: destination absolute path (in a local file system)
returns the status of the transfer. In case of failure it should remove the partially copied destination
"""
# The local file is assumed to have a relative path that is the same of the relative path in the 'gpfn'
# loc_... are the variables used to access the file in the locally exported file system
error = PilotErrors()
pilotErrorDiag = ""
# try to get the direct reading control variable (False for direct reading mode; file should not be copied)
useCT = pdict.get('usect', True)
jobId = pdict.get('jobId', '')
dsname = pdict.get('dsname', '')
workDir = pdict.get('workDir', '')
prodDBlockToken = pdict.get('access', '')
# get the DQ2 tracing report
report = self.getStubTracingReport(pdict['report'], 'xrootd', lfn, guid)
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
ec, pilotErrorDiag = verifySetupCommand(error, _setup_str)
if ec != 0:
self.prepareReport('RFCP_FAIL', report)
return ec, pilotErrorDiag
tolog("xrootdSiteMover get_data using setup: %s" % (_setup_str))
# remove any host and SFN info from PFN path
src_loc_pfn = self.extractPathFromPFN(gpfn)
src_loc_filename = lfn
# source vars: gpfn, loc_pfn, loc_host, loc_dirname, loc_filename
# dest vars: path
if fchecksum != 0 and fchecksum != "":
csumtype = self.getChecksumType(fchecksum)
else:
csumtype = "default"
# protect against bad pfn's
src_loc_pfn = src_loc_pfn.replace('///','/')
src_loc_pfn = src_loc_pfn.replace('//xrootd/','/xrootd/')
# should the root file be copied or read directly by athena?
directIn, useFileStager = self.getTransferModes()
if directIn:
if useCT:
directIn = False
tolog("Direct access mode is switched off (file will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
else:
rootFile = self.isRootFile(src_loc_pfn, setup=_setup_str)
if prodDBlockToken == 'local' or not rootFile:
directIn = False
tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
elif rootFile:
tolog("Found root file: %s (will not be transferred in direct reading mode)" % (src_loc_pfn))
report['relativeStart'] = None
report['transferStart'] = None
self.prepareReport('IS_ROOT', report)
if useFileStager:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input")
else:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input")
return error.ERR_DIRECTIOFILE, pilotErrorDiag
else:
tolog("Normal file transfer")
else:
tolog("No direct access mode")
ec = 0
if fsize == 0 or fchecksum == 0:
ec, pilotErrorDiag, fsize, fchecksum = self.getLocalFileInfo(src_loc_pfn, csumtype=csumtype)
if ec != 0:
self.prepareReport('GET_LOCAL_FILE_INFO_FAIL', report)
return ec, pilotErrorDiag
dest_file = os.path.join(path, src_loc_filename)
report['relativeStart'] = time()
# determine which copy command to use
cpt = self.getCopytool(_setup_str)
report['transferStart'] = time()
cmd = "%s %s %s %s" % (_setup_str, cpt, src_loc_pfn, dest_file)
#PN
# if ".lib." in src_loc_pfn:
# cmd = "%s %s %s %s" % (_setup_str, cpt, src_loc_pfn, dest_file)
# else:
# cmd = "%s %sXXX %s %s" % (_setup_str, cpt, src_loc_pfn, dest_file)
tolog("Executing command: %s" % (cmd))
# execute
timeout = 3600
try:
rc, telapsed, cout, cerr = timed_command(cmd, timeout)
except Exception, e:
self.__pilotErrorDiag = 'timed_command() threw an exception: %s' % str(e)
tolog("!!WARNING!!1111!! %s" % (pilotErrorDiag))
rc = 1
rs = str(e)
telapsed = timeout
else:
# improve output parsing, keep stderr and stdout separate
rs = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
if rc != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (cmd))
pilotErrorDiag = "Error copying the file: %d, %s" % (rc, rs)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
# did the copy command time out?
if is_timeout(rc):
pilotErrorDiag = "xcp get was timed out after %d seconds" % (telapsed)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('GET_TIMEOUT', report)
return error.ERR_GETTIMEOUT, pilotErrorDiag
self.prepareReport('CMD_FAIL', report)
return error.ERR_STAGEINFAILED, pilotErrorDiag
report['validateStart'] = time()
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = self.getLocalFileInfo(dest_file, csumtype=csumtype)
tolog("File info: %d, %s, %s" % (ec, dstfsize, dstfchecksum))
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return ec, pilotErrorDiag
# compare remote and local file checksum
if dstfchecksum != fchecksum and not self.isDummyChecksum(fchecksum):
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(gpfn), dstfchecksum, fchecksum)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
if csumtype == "adler32":
self.prepareReport('AD_MISMATCH', report)
return error.ERR_GETADMISMATCH, pilotErrorDiag
else:
self.prepareReport('MD5_MISMATCH', report)
return error.ERR_GETMD5MISMATCH, pilotErrorDiag
# compare remote and local file size
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(gpfn), str(dstfsize), str(fsize))
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.prepareReport('FS_MISMATCH', report)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return error.ERR_GETWRONGSIZE, pilotErrorDiag
updateFileState(lfn, workDir, jobId, mode="file_state", state="transferred", type="input")
self.prepareReport('DONE', report)
return 0, pilotErrorDiag
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
""" Moves the file from the current local directory to a storage element
source: full path of the file in local directory
destination: destination SE, method://[hostname[:port]]/full-dir-path/ (NB: no file name)
Assumes that the SE is locally mounted and its local path is the same as the remote path
if both fsize and fchecksum (for the source) are given and !=0 these are assumed without reevaluating them
returns: exitcode, gpfn,fsize, fchecksum
"""
error = PilotErrors()
# Get input parameters from pdict
lfn = pdict.get('lfn', '')
guid = pdict.get('guid', '')
token = pdict.get('token', '')
scope = pdict.get('scope', '')
jobId = pdict.get('jobId', '')
workDir = pdict.get('workDir', '')
dsname = pdict.get('dsname', '')
analyJob = pdict.get('analyJob', False)
extradirs = pdict.get('extradirs', '')
experiment = pdict.get('experiment', '')
prodSourceLabel = pdict.get('prodSourceLabel', '')
# get the site information object
si = getSiteInformation(experiment)
if prodSourceLabel == 'ddm' and analyJob:
tolog("Treating PanDA Mover job as a production job during stage-out")
analyJob = False
# get the DQ2 tracing report
report = self.getStubTracingReport(pdict['report'], 'xrootd', lfn, guid)
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
ec, pilotErrorDiag = verifySetupCommand(error, _setup_str)
if ec != 0:
self.prepareReport('RFCP_FAIL', report)
return self.put_data_retfail(ec, pilotErrorDiag)
report['relativeStart'] = time()
ec = 0
if fsize == 0 or fchecksum == 0:
if not self.useExternalAdler32():
# Can not use external adler32 command for remote file since the command is
# not available (defaulting to md5sum for put operation)
tolog("Command not found: adler32.sh (will switch to md5sum for local file checksum)")
csumtype = "default"
else:
csumtype = "adler32"
ec, pilotErrorDiag, fsize, fchecksum = self.getLocalFileInfo(source, csumtype=csumtype)
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return self.put_data_retfail(ec, pilotErrorDiag)
# now that the file size is known, add it to the tracing report
report['filesize'] = fsize
tolog("File destination: %s" % (destination))
dst_se = destination
# srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv1?SFN=/pnfs/usatlas.bnl.gov/
if( dst_se.find('SFN') != -1 ):
s = dst_se.split('SFN=')
dst_loc_se = s[1]
dst_prefix = s[0] + 'SFN='
else:
_sentries = dst_se.split('/', 3)
# 'method://host:port' is it always a ftp server? can it be srm? something else?
dst_serv = _sentries[0] + '//' + _sentries[2]
# dst_host = _sentries[2] # host and port
dst_loc_se = '/'+ _sentries[3]
dst_prefix = dst_serv
# use bare destination when it starts with root://
if destination.startswith('root://'):
dst_loc_se = destination
dst_prefix = ''
# report['dataset'] = dsname
# May be be a comma list but take first always
# (Remember that se can be a list where the first is used for output but any can be used for input)
se = readpar('se').split(",")[0]
_dummytoken, se = self.extractSE(se)
tolog("Using SE: %s" % (se))
filename = os.path.basename(source)
ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl = si.getProperPaths(error, analyJob, token, prodSourceLabel, dsname, filename, scope=scope, sitemover=self) # quick workaround
if ec != 0:
self.prepareReport(tracer_error, report)
return self.put_data_retfail(ec, pilotErrorDiag)
# are we transfering to a space token?
if token != None and token != "":
# Special case for GROUPDISK (do not remove dst: bit before this stage, needed in several places)
if "dst:" in token:
token = token[len('dst:'):]
tolog("Dropped dst: part of space token descriptor; token=%s" % (token))
token = "ATLASGROUPDISK"
tolog("Space token descriptor reset to: %s" % (token))
# get the proper destination
#destination = self.getDestination(analyJob, token)
#if destination == '':
# pilotErrorDiag = "put_data destination path in SE not defined"
# tolog('!!WARNING!!2990!! %s' % (pilotErrorDiag))
# self.prepareReport('SE_DEST_PATH_UNDEF', report)
# return self.put_data_retfail(error.ERR_STAGEOUTFAILED, pilotErrorDiag)
#tolog("Going to store job output at destination: %s" % (destination))
# add the space token to the destination string
#dst_loc_sedir = os.path.join(destination, os.path.join(extradirs, dsname))
#dst_loc_pfn = os.path.join(dst_loc_sedir, filename)
#dst_loc_pfn += "?oss.cgroup=%s" % (token)
dst_loc_pfn = dst_gpfn + "?oss.cgroup=%s" % (token)
#else:
#dst_loc_sedir = os.path.join(dst_loc_se, os.path.join(extradirs, dsname))
#dst_loc_pfn = os.path.join(dst_loc_sedir, filename)
dst_loc_pfn = dst_gpfn
dst_gpfn = dst_prefix + dst_loc_pfn
tolog("Final destination path: %s" % (dst_loc_pfn))
tolog("dst_gpfn: %s" % (dst_gpfn))
# get the DQ2 site name from ToA
try:
_dq2SiteName = self.getDQ2SiteName(surl=dst_gpfn)
except Exception, e:
tolog("Warning: Failed to get the DQ2 site name: %s (can not add this info to tracing report)" % str(e))
else:
report['localSite'], report['remoteSite'] = (_dq2SiteName, _dq2SiteName)
tolog("DQ2 site name: %s" % (_dq2SiteName))
# determine which copy command to use
cpt = self.getCopytool(_setup_str)
cmd = "%s %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
# cmd = "%sXXX %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
#PN
# if ".log." in dst_loc_pfn:
# cmd = "%s %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
# else:
# cmd = "%sXXX %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
tolog("Executing command: %s" % (cmd))
report['transferStart'] = time()
# execute
timeout = 3600
try:
rc, telapsed, cout, cerr = timed_command(cmd, timeout)
except Exception, e:
self.__pilotErrorDiag = 'timed_command() threw an exception: %s' % str(e)
tolog("!!WARNING!!1111!! %s" % (pilotErrorDiag))
rc = 1
rs = str(e)
telapsed = timeout
else:
# improve output parsing, keep stderr and stdout separate
rs = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
# ready with the space token descriptor, remove it from the path if present
if "?oss.cgroup=" in dst_loc_pfn:
dst_loc_pfn = dst_loc_pfn[:dst_loc_pfn.find("?oss.cgroup=")]
dst_gpfn = dst_gpfn[:dst_gpfn.find("?oss.cgroup=")]
tolog("Removed space token part from dst_loc_pfn (not needed anymore): %s" % (dst_loc_pfn))
if rc != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (cmd))
pilotErrorDiag = "Error copying the file: %d, %s" % (rc, rs)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
# did the copy command time out?
if is_timeout(rc):
pilotErrorDiag = "xcp get was timed out after %d seconds" % (telapsed)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('PUT_TIMEOUT', report)
return self.put_data_retfail(error.ERR_PUTTIMEOUT, pilotErrorDiag, surl=dst_gpfn)
self.prepareReport('COPY_ERROR', report)
return self.put_data_retfail(error.ERR_STAGEOUTFAILED, pilotErrorDiag, surl=dst_gpfn)
report['validateStart'] = time()
# get the checksum type (md5sum or adler32)
if fchecksum != 0 and fchecksum != "":
csumtype = self.getChecksumType(fchecksum)
else:
csumtype = "default"
if csumtype == "adler32" and not self.useExternalAdler32():
# Can not use external adler32 command for remote file since the command is
# not available (defaulting to md5sum for put operation)
tolog("Command not found: adler32.sh (will switch to md5sum for remote file checksum)")
csumtype = "default"
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = self.getLocalFileInfo(dst_loc_pfn, csumtype=csumtype)
tolog("File info: %d, %s, %s" % (ec, dstfsize, dstfchecksum))
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return self.put_data_retfail(ec, pilotErrorDiag, surl=dst_gpfn)
# compare remote and local file checksum
if dstfchecksum != fchecksum:
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(dst_gpfn), dstfchecksum, fchecksum)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
if csumtype == "adler32":
self.prepareReport('AD_MISMATCH', report)
return self.put_data_retfail(error.ERR_PUTADMISMATCH, pilotErrorDiag, surl=dst_gpfn)
else:
self.prepareReport('MD5_MISMATCH', report)
return self.put_data_retfail(error.ERR_PUTMD5MISMATCH, pilotErrorDiag, surl=dst_gpfn)
# compare remote and local file size
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(dst_gpfn), str(dstfsize), str(fsize))
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('FS_MISMATCH', report)
return self.put_data_retfail(error.ERR_PUTWRONGSIZE, pilotErrorDiag, surl=dst_gpfn)
self.prepareReport('DONE', report)
return 0, pilotErrorDiag, dst_gpfn, fsize, fchecksum, ARCH_DEFAULT
def check_space(self, ub):
"""
Checking space availability:
1. check DQ space URL
2. get storage path and check local space availability
"""
# http://bandicoot.uits.indiana.edu:8000/dq2/space/free
# http://bandicoot.uits.indiana.edu:8000/dq2/space/total
# http://bandicoot.uits.indiana.edu:8000/dq2/space/default
if ub == "" or ub == "None" or ub == None:
tolog("Using alternative check space function since URL method can not be applied (URL not set)")
retn = self._check_space(ub)
else:
try:
f = urllib.urlopen(ub + '/space/free')
ret = f.read()
retn = int(ret)
if retn == 0:
tolog(ub + '/space/free returned 0 space available, returning 999995')
retn = 999995
except:
tolog("Using alternative check space function since URL method failed")
retn = self._check_space(ub)
return retn
def _check_space(self, ub):
"""Checking space of a local directory"""
# "source setup.sh"
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
fail = 0
ret = ''
if ub == "" or ub == "None" or ub == None:
# seprodpath can have a complex structure in case of space tokens
# although currently not supported in this site mover, prepare the code anyway
# (use the first list item only)
dst_loc_se = self.getDirList(readpar('seprodpath'))[0]
if dst_loc_se == "":
dst_loc_se = readpar('sepath')
if dst_loc_se == "":
tolog("WARNING: Can not perform alternative space check since sepath is not set")
return -1
else:
tolog("Attempting to use df for checking SE space: %s" % (dst_loc_se))
return self.check_space_df(dst_loc_se)
else:
try:
f = urllib.urlopen(ub + '/storages/default')
except Exception, e:
tolog('!!WARNING!!2999!! Fetching default storage failed!')
return -1
else:
ret = f.read()
if ret.find('//') == -1:
tolog('!!WARNING!!2999!! Fetching default storage failed!')
fail = -1
else:
dst_se = ret.strip()
# srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv1?SFN=/pnOAfs/usatlas.bnl.gov/
if (dst_se.find('SFN') != -1):
s = dst_se.split('SFN=')
dst_loc_se = s[1]
#dst_prefix = s[0]
else:
_sentries = dst_se.split('/', 3)
# 'method://host:port' is it always a ftp server? can it be srm? something else?
dst_loc_se = '/'+ _sentries[3]
# Run df to check space availability
s, o = commands.getstatusoutput('%s df %s' % (_setup_str, dst_loc_se))
if s != 0:
check_syserr(s, o)
tolog("!!WARNING!!2999!! Error in running df: %s" % o)
fail = -1
else:
# parse Wei's df script (extract the space info)
df_split = o.split("\n")[1]
p = r"XROOTD[ ]+\d+[ ]+\d+[ ]+(\S+)[ ]+"
pattern = compile(p)
available = findall(pattern, df_split)
try:
available_space = available[0]
except:
available_space = 999999999
if fail != 0:
return fail
else:
return available_space
def getLocalFileInfo(self, fname, csumtype="default"):
""" returns exit code (0 if OK), file size and checksum """
error = PilotErrors()
pilotErrorDiag = ""
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
tolog("getLocalFileInfo using setup: %s" % (_setup_str))
# get the file size
fsize = str(self.getRemoteFileSize(fname))
if fsize == "0":
pilotErrorDiag = "Encountered zero file size for file %s" % (fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_ZEROFILESIZE, pilotErrorDiag, 0, 0
# pilotErrorDiag = "Could not get file size for file: %s" % (fname)
# tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
# return error.ERR_FAILEDSIZELOCAL, pilotErrorDiag, 0, 0
# get the checksum
if csumtype == "adler32":
if not self.useExternalAdler32():
tolog("External adler32.sh command not found, using built-in function")
fchecksum = self.adler32(fname)
else:
_CMD_CHECKSUM = "adler32.sh"
cmd = '%s %s %s' % (_setup_str, _CMD_CHECKSUM, fname)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
pilotErrorDiag = "Error running checksum command (%s): %s" % (_CMD_CHECKSUM, o)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
# try to continue
# confirm output
_fchecksum_prel, pilotErrorDiag = self.parseAdler32(o, fname)
if _fchecksum_prel == "":
return error.ERR_FAILEDADLOCAL, pilotErrorDiag, fsize, 0
fchecksum = _fchecksum_prel.split()[0]
if fchecksum == '00000001': # "%08x" % 1L
pilotErrorDiag = "Adler32 failed (returned %s)" % (fchecksum)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDADLOCAL, pilotErrorDiag, fsize, 0
tolog("Using checksum: %s" % (fchecksum))
else:
cmd = '%s which %s' % (_setup_str, CMD_CHECKSUM)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
tolog("cmd output: %s" % o)
cmd = '%s %s %s' % (_setup_str, CMD_CHECKSUM, fname)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
pilotErrorDiag = "Error running checksum command (%s): %s" % (CMD_CHECKSUM, o)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDMD5LOCAL, pilotErrorDiag, fsize, 0
fchecksum = o.split()[0]
return 0, pilotErrorDiag, fsize, fchecksum
def useExternalAdler32(self):
""" check if the local adler32 command is available, if not md5sum will be used """
status = True
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
cmd = "%s which adler32.sh" % (_setup_str)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
if s != 0:
tolog("!!WARNING!!2999!! s=%d, o=%s" % (s, o))
# Command not found: adler32.sh (will default to use md5sum for checksums
status = False
return status
def parseAdler32(self, output, fname):
""" parse the adler32.sh output in case there was an AFS hickup """
# error in the output has the form:
# ERROR: some message. <checksum> <file name>
# This function should return "<checksum> <file name>"
# In case of problems, the function will return an empty string and the error diag
_output = ""
pilotErrorDiag = ""
tolog("Parsing adler32 output: %s" % (output))
try:
_output_prel = output.split(" ")
except Exception, e:
pilotErrorDiag = "Exception caught in parseAdler32: %s, %s" % (output, str(e))
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
else:
if len(_output_prel) >= 2:
_adler32 = _output_prel[-2]
_filename = _output_prel[-1]
# make sure that _adler32 and _filename make sense
if len(_output_prel) > 2:
tolog("!!WARNING!!2999!! parseAdler32 found garbled output: %s" % (output))
# try to interpret output
if len(_adler32) != 8 or not _adler32.isalnum():
pilotErrorDiag = "parseAdler32: Wrong format of interpreted adler32: %s" % (_adler32)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
elif _filename != fname:
pilotErrorDiag = "parseAdler32: File names do not match: %s ne %s" % (_filename, fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
else:
# put back confirmed values in _output
_output = _adler32 + " " + _filename
tolog('Interpreted output ok: \"%s\"' % (_output))
else:
pilotErrorDiag = "parseAdler32 could not interpret output: %s" % (output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return _output, pilotErrorDiag
def getRemoteFileSize(self, fname):
""" return the file size of the remote file """
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
size = 0
cmd = "%s stat %s" % (_setup_str, fname)
tolog("Executing command: %s" % (cmd))
stat = commands.getoutput(cmd)
# get the second line in the stat output which contains the size
try:
stat_split = stat.split("\n")[1]
except Exception, e:
tolog("!!WARNING!!2999!! Failed to execute commands:")
tolog(".stat: %s" % (stat))
tolog(".stat_split: %s" % (str(e)))
size = 0
else:
# reg ex search pattern
pattern = compile(r"Size:[ ]+(\d+)")
# try to find the size in the stat output
fsize = findall(pattern, stat_split)
try:
size = fsize[0]
except:
tolog("!!WARNING!!2999!! stat command did not return file size")
size = 0
return size
def getMover(cls, *args, **kwrds):
"""
Creates and provides exactly one instance for each required subclass of SiteMover.
Implements the Singleton pattern
"""
cl_name = cls.__name__
if not issubclass(cls, SiteMover):
log.error("Wrong Factory invocation, %s is not subclass of SiteMover" % cl_name)
else:
return cls(*args, **kwrds)
getMover = classmethod(getMover)
| 42.595052 | 191 | 0.571027 |
""" Site mover used at e.g. UTA, SLACXRD """
import os
import shutil
import commands
import urllib
from time import time
import SiteMover
from re import compile, findall
from futil import *
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, verifySetupCommand, getSiteInformation
from config import config_sm
from FileStateClient import updateFileState
from timed_command import timed_command
PERMISSIONS_DIR = config_sm.PERMISSIONS_DIR
PERMISSIONS_FILE = config_sm.PERMISSIONS_FILE
CMD_CHECKSUM = config_sm.COMMAND_MD5
ARCH_DEFAULT = config_sm.ARCH_DEFAULT
class xrootdSiteMover(SiteMover.SiteMover):
"""
File movers move files between a SE (of different kind) and a local directory
where all posix operations have to be supported and fast access is supposed
get_data: SE->local
put_data: local->SE
check_space: available space in SE
This is the Default SiteMover, the SE has to be locally accessible for all the WNs
and all commands like cp, mkdir, md5checksum have to be available on files in the SE
E.g. NFS exported file system
"""
__childDict = {}
copyCommand = "xcp"
checksum_command = "adler32"
permissions_DIR = PERMISSIONS_DIR
permissions_FILE = PERMISSIONS_FILE
arch_type = ARCH_DEFAULT
timeout = 5*3600
def __init__(self, setup_path='', *args, **kwrds):
""" default init """
self._setup = setup_path
tolog("Init is using _setup: %s" % (self._setup))
def get_timeout(self):
return self.timeout
def getID(self):
""" returnd SM ID, the copy command used for it """
return self.copyCommand
def getSetup(self):
""" returns the setup string (pacman setup os setup script) for the copy command """
return self._setup
def getCopytool(self, setup):
""" determine which copy command to use """
cmd = "which xcp"
cpt = "cp"
try:
rs = commands.getoutput("%s which xcp" % (setup))
except Exception, e:
tolog("!!WARNING!!2999!! Failed the copy command test: %s" % str(e))
else:
if rs.find("no xcp") >= 0:
cpt = "cp"
else:
cpt = "xcp"
tolog("Will use %s to transfer file" % (cpt))
return cpt
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
"""
Moves a DS file the local SE (where was put from DDM) to the working directory.
Performs the copy and, for systems supporting it, checks size and md5sum correctness
gpfn: full source URL (e.g. method://[host[:port]/full-dir-path/filename - a SRM URL is OK)
path: destination absolute path (in a local file system)
returns the status of the transfer. In case of failure it should remove the partially copied destination
"""
error = PilotErrors()
pilotErrorDiag = ""
useCT = pdict.get('usect', True)
jobId = pdict.get('jobId', '')
dsname = pdict.get('dsname', '')
workDir = pdict.get('workDir', '')
prodDBlockToken = pdict.get('access', '')
report = self.getStubTracingReport(pdict['report'], 'xrootd', lfn, guid)
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
ec, pilotErrorDiag = verifySetupCommand(error, _setup_str)
if ec != 0:
self.prepareReport('RFCP_FAIL', report)
return ec, pilotErrorDiag
tolog("xrootdSiteMover get_data using setup: %s" % (_setup_str))
src_loc_pfn = self.extractPathFromPFN(gpfn)
src_loc_filename = lfn
if fchecksum != 0 and fchecksum != "":
csumtype = self.getChecksumType(fchecksum)
else:
csumtype = "default"
src_loc_pfn = src_loc_pfn.replace('///','/')
src_loc_pfn = src_loc_pfn.replace('//xrootd/','/xrootd/')
# should the root file be copied or read directly by athena?
directIn, useFileStager = self.getTransferModes()
if directIn:
if useCT:
directIn = False
tolog("Direct access mode is switched off (file will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
else:
rootFile = self.isRootFile(src_loc_pfn, setup=_setup_str)
if prodDBlockToken == 'local' or not rootFile:
directIn = False
tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
elif rootFile:
tolog("Found root file: %s (will not be transferred in direct reading mode)" % (src_loc_pfn))
report['relativeStart'] = None
report['transferStart'] = None
self.prepareReport('IS_ROOT', report)
if useFileStager:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input")
else:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input")
return error.ERR_DIRECTIOFILE, pilotErrorDiag
else:
tolog("Normal file transfer")
else:
tolog("No direct access mode")
ec = 0
if fsize == 0 or fchecksum == 0:
ec, pilotErrorDiag, fsize, fchecksum = self.getLocalFileInfo(src_loc_pfn, csumtype=csumtype)
if ec != 0:
self.prepareReport('GET_LOCAL_FILE_INFO_FAIL', report)
return ec, pilotErrorDiag
dest_file = os.path.join(path, src_loc_filename)
report['relativeStart'] = time()
# determine which copy command to use
cpt = self.getCopytool(_setup_str)
report['transferStart'] = time()
cmd = "%s %s %s %s" % (_setup_str, cpt, src_loc_pfn, dest_file)
#PN
# if ".lib." in src_loc_pfn:
# cmd = "%s %s %s %s" % (_setup_str, cpt, src_loc_pfn, dest_file)
# else:
# cmd = "%s %sXXX %s %s" % (_setup_str, cpt, src_loc_pfn, dest_file)
tolog("Executing command: %s" % (cmd))
# execute
timeout = 3600
try:
rc, telapsed, cout, cerr = timed_command(cmd, timeout)
except Exception, e:
self.__pilotErrorDiag = 'timed_command() threw an exception: %s' % str(e)
tolog("!!WARNING!!1111!! %s" % (pilotErrorDiag))
rc = 1
rs = str(e)
telapsed = timeout
else:
# improve output parsing, keep stderr and stdout separate
rs = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
if rc != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (cmd))
pilotErrorDiag = "Error copying the file: %d, %s" % (rc, rs)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
# did the copy command time out?
if is_timeout(rc):
pilotErrorDiag = "xcp get was timed out after %d seconds" % (telapsed)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('GET_TIMEOUT', report)
return error.ERR_GETTIMEOUT, pilotErrorDiag
self.prepareReport('CMD_FAIL', report)
return error.ERR_STAGEINFAILED, pilotErrorDiag
report['validateStart'] = time()
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = self.getLocalFileInfo(dest_file, csumtype=csumtype)
tolog("File info: %d, %s, %s" % (ec, dstfsize, dstfchecksum))
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return ec, pilotErrorDiag
# compare remote and local file checksum
if dstfchecksum != fchecksum and not self.isDummyChecksum(fchecksum):
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(gpfn), dstfchecksum, fchecksum)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
if csumtype == "adler32":
self.prepareReport('AD_MISMATCH', report)
return error.ERR_GETADMISMATCH, pilotErrorDiag
else:
self.prepareReport('MD5_MISMATCH', report)
return error.ERR_GETMD5MISMATCH, pilotErrorDiag
# compare remote and local file size
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(gpfn), str(dstfsize), str(fsize))
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
self.prepareReport('FS_MISMATCH', report)
# remove the local file before any get retry is attempted
_status = self.removeLocal(dest_file)
if not _status:
tolog("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return error.ERR_GETWRONGSIZE, pilotErrorDiag
updateFileState(lfn, workDir, jobId, mode="file_state", state="transferred", type="input")
self.prepareReport('DONE', report)
return 0, pilotErrorDiag
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
""" Moves the file from the current local directory to a storage element
source: full path of the file in local directory
destination: destination SE, method://[hostname[:port]]/full-dir-path/ (NB: no file name)
Assumes that the SE is locally mounted and its local path is the same as the remote path
if both fsize and fchecksum (for the source) are given and !=0 these are assumed without reevaluating them
returns: exitcode, gpfn,fsize, fchecksum
"""
error = PilotErrors()
# Get input parameters from pdict
lfn = pdict.get('lfn', '')
guid = pdict.get('guid', '')
token = pdict.get('token', '')
scope = pdict.get('scope', '')
jobId = pdict.get('jobId', '')
workDir = pdict.get('workDir', '')
dsname = pdict.get('dsname', '')
analyJob = pdict.get('analyJob', False)
extradirs = pdict.get('extradirs', '')
experiment = pdict.get('experiment', '')
prodSourceLabel = pdict.get('prodSourceLabel', '')
# get the site information object
si = getSiteInformation(experiment)
if prodSourceLabel == 'ddm' and analyJob:
tolog("Treating PanDA Mover job as a production job during stage-out")
analyJob = False
# get the DQ2 tracing report
report = self.getStubTracingReport(pdict['report'], 'xrootd', lfn, guid)
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
ec, pilotErrorDiag = verifySetupCommand(error, _setup_str)
if ec != 0:
self.prepareReport('RFCP_FAIL', report)
return self.put_data_retfail(ec, pilotErrorDiag)
report['relativeStart'] = time()
ec = 0
if fsize == 0 or fchecksum == 0:
if not self.useExternalAdler32():
# Can not use external adler32 command for remote file since the command is
# not available (defaulting to md5sum for put operation)
tolog("Command not found: adler32.sh (will switch to md5sum for local file checksum)")
csumtype = "default"
else:
csumtype = "adler32"
ec, pilotErrorDiag, fsize, fchecksum = self.getLocalFileInfo(source, csumtype=csumtype)
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return self.put_data_retfail(ec, pilotErrorDiag)
# now that the file size is known, add it to the tracing report
report['filesize'] = fsize
tolog("File destination: %s" % (destination))
dst_se = destination
# srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv1?SFN=/pnfs/usatlas.bnl.gov/
if( dst_se.find('SFN') != -1 ):
s = dst_se.split('SFN=')
dst_loc_se = s[1]
dst_prefix = s[0] + 'SFN='
else:
_sentries = dst_se.split('/', 3)
# 'method://host:port' is it always a ftp server? can it be srm? something else?
dst_serv = _sentries[0] + '//' + _sentries[2]
# dst_host = _sentries[2] # host and port
dst_loc_se = '/'+ _sentries[3]
dst_prefix = dst_serv
# use bare destination when it starts with root://
if destination.startswith('root://'):
dst_loc_se = destination
dst_prefix = ''
# report['dataset'] = dsname
# May be be a comma list but take first always
# (Remember that se can be a list where the first is used for output but any can be used for input)
se = readpar('se').split(",")[0]
_dummytoken, se = self.extractSE(se)
tolog("Using SE: %s" % (se))
filename = os.path.basename(source)
ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl = si.getProperPaths(error, analyJob, token, prodSourceLabel, dsname, filename, scope=scope, sitemover=self) # quick workaround
if ec != 0:
self.prepareReport(tracer_error, report)
return self.put_data_retfail(ec, pilotErrorDiag)
# are we transfering to a space token?
if token != None and token != "":
# Special case for GROUPDISK (do not remove dst: bit before this stage, needed in several places)
if "dst:" in token:
token = token[len('dst:'):]
tolog("Dropped dst: part of space token descriptor; token=%s" % (token))
token = "ATLASGROUPDISK"
tolog("Space token descriptor reset to: %s" % (token))
# get the proper destination
#destination = self.getDestination(analyJob, token)
#if destination == '':
# pilotErrorDiag = "put_data destination path in SE not defined"
# tolog('!!WARNING!!2990!! %s' % (pilotErrorDiag))
# self.prepareReport('SE_DEST_PATH_UNDEF', report)
# return self.put_data_retfail(error.ERR_STAGEOUTFAILED, pilotErrorDiag)
#tolog("Going to store job output at destination: %s" % (destination))
# add the space token to the destination string
#dst_loc_sedir = os.path.join(destination, os.path.join(extradirs, dsname))
#dst_loc_pfn = os.path.join(dst_loc_sedir, filename)
#dst_loc_pfn += "?oss.cgroup=%s" % (token)
dst_loc_pfn = dst_gpfn + "?oss.cgroup=%s" % (token)
#else:
#dst_loc_sedir = os.path.join(dst_loc_se, os.path.join(extradirs, dsname))
#dst_loc_pfn = os.path.join(dst_loc_sedir, filename)
dst_loc_pfn = dst_gpfn
dst_gpfn = dst_prefix + dst_loc_pfn
tolog("Final destination path: %s" % (dst_loc_pfn))
tolog("dst_gpfn: %s" % (dst_gpfn))
# get the DQ2 site name from ToA
try:
_dq2SiteName = self.getDQ2SiteName(surl=dst_gpfn)
except Exception, e:
tolog("Warning: Failed to get the DQ2 site name: %s (can not add this info to tracing report)" % str(e))
else:
report['localSite'], report['remoteSite'] = (_dq2SiteName, _dq2SiteName)
tolog("DQ2 site name: %s" % (_dq2SiteName))
# determine which copy command to use
cpt = self.getCopytool(_setup_str)
cmd = "%s %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
# cmd = "%sXXX %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
#PN
# if ".log." in dst_loc_pfn:
# cmd = "%s %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
# else:
# cmd = "%sXXX %s %s %s" % (_setup_str, cpt, source, dst_loc_pfn)
tolog("Executing command: %s" % (cmd))
report['transferStart'] = time()
# execute
timeout = 3600
try:
rc, telapsed, cout, cerr = timed_command(cmd, timeout)
except Exception, e:
self.__pilotErrorDiag = 'timed_command() threw an exception: %s' % str(e)
tolog("!!WARNING!!1111!! %s" % (pilotErrorDiag))
rc = 1
rs = str(e)
telapsed = timeout
else:
# improve output parsing, keep stderr and stdout separate
rs = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
# ready with the space token descriptor, remove it from the path if present
if "?oss.cgroup=" in dst_loc_pfn:
dst_loc_pfn = dst_loc_pfn[:dst_loc_pfn.find("?oss.cgroup=")]
dst_gpfn = dst_gpfn[:dst_gpfn.find("?oss.cgroup=")]
tolog("Removed space token part from dst_loc_pfn (not needed anymore): %s" % (dst_loc_pfn))
if rc != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (cmd))
pilotErrorDiag = "Error copying the file: %d, %s" % (rc, rs)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
# did the copy command time out?
if is_timeout(rc):
pilotErrorDiag = "xcp get was timed out after %d seconds" % (telapsed)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('PUT_TIMEOUT', report)
return self.put_data_retfail(error.ERR_PUTTIMEOUT, pilotErrorDiag, surl=dst_gpfn)
self.prepareReport('COPY_ERROR', report)
return self.put_data_retfail(error.ERR_STAGEOUTFAILED, pilotErrorDiag, surl=dst_gpfn)
report['validateStart'] = time()
# get the checksum type (md5sum or adler32)
if fchecksum != 0 and fchecksum != "":
csumtype = self.getChecksumType(fchecksum)
else:
csumtype = "default"
if csumtype == "adler32" and not self.useExternalAdler32():
# Can not use external adler32 command for remote file since the command is
# not available (defaulting to md5sum for put operation)
tolog("Command not found: adler32.sh (will switch to md5sum for remote file checksum)")
csumtype = "default"
# get remote file size and checksum
ec, pilotErrorDiag, dstfsize, dstfchecksum = self.getLocalFileInfo(dst_loc_pfn, csumtype=csumtype)
tolog("File info: %d, %s, %s" % (ec, dstfsize, dstfchecksum))
if ec != 0:
self.prepareReport('LOCAL_FILE_INFO_FAIL', report)
return self.put_data_retfail(ec, pilotErrorDiag, surl=dst_gpfn)
# compare remote and local file checksum
if dstfchecksum != fchecksum:
pilotErrorDiag = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(dst_gpfn), dstfchecksum, fchecksum)
tolog('!!WARNING!!2999!! %s' % (pilotErrorDiag))
if csumtype == "adler32":
self.prepareReport('AD_MISMATCH', report)
return self.put_data_retfail(error.ERR_PUTADMISMATCH, pilotErrorDiag, surl=dst_gpfn)
else:
self.prepareReport('MD5_MISMATCH', report)
return self.put_data_retfail(error.ERR_PUTMD5MISMATCH, pilotErrorDiag, surl=dst_gpfn)
# compare remote and local file size
if dstfsize != fsize:
pilotErrorDiag = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(dst_gpfn), str(dstfsize), str(fsize))
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
self.prepareReport('FS_MISMATCH', report)
return self.put_data_retfail(error.ERR_PUTWRONGSIZE, pilotErrorDiag, surl=dst_gpfn)
self.prepareReport('DONE', report)
return 0, pilotErrorDiag, dst_gpfn, fsize, fchecksum, ARCH_DEFAULT
def check_space(self, ub):
"""
Checking space availability:
1. check DQ space URL
2. get storage path and check local space availability
"""
# http://bandicoot.uits.indiana.edu:8000/dq2/space/free
# http://bandicoot.uits.indiana.edu:8000/dq2/space/total
# http://bandicoot.uits.indiana.edu:8000/dq2/space/default
if ub == "" or ub == "None" or ub == None:
tolog("Using alternative check space function since URL method can not be applied (URL not set)")
retn = self._check_space(ub)
else:
try:
f = urllib.urlopen(ub + '/space/free')
ret = f.read()
retn = int(ret)
if retn == 0:
tolog(ub + '/space/free returned 0 space available, returning 999995')
retn = 999995
except:
tolog("Using alternative check space function since URL method failed")
retn = self._check_space(ub)
return retn
def _check_space(self, ub):
"""Checking space of a local directory"""
# "source setup.sh"
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
fail = 0
ret = ''
if ub == "" or ub == "None" or ub == None:
# seprodpath can have a complex structure in case of space tokens
# although currently not supported in this site mover, prepare the code anyway
# (use the first list item only)
dst_loc_se = self.getDirList(readpar('seprodpath'))[0]
if dst_loc_se == "":
dst_loc_se = readpar('sepath')
if dst_loc_se == "":
tolog("WARNING: Can not perform alternative space check since sepath is not set")
return -1
else:
tolog("Attempting to use df for checking SE space: %s" % (dst_loc_se))
return self.check_space_df(dst_loc_se)
else:
try:
f = urllib.urlopen(ub + '/storages/default')
except Exception, e:
tolog('!!WARNING!!2999!! Fetching default storage failed!')
return -1
else:
ret = f.read()
if ret.find('//') == -1:
tolog('!!WARNING!!2999!! Fetching default storage failed!')
fail = -1
else:
dst_se = ret.strip()
# srm://dcsrm.usatlas.bnl.gov:8443/srm/managerv1?SFN=/pnOAfs/usatlas.bnl.gov/
if (dst_se.find('SFN') != -1):
s = dst_se.split('SFN=')
dst_loc_se = s[1]
#dst_prefix = s[0]
else:
_sentries = dst_se.split('/', 3)
# 'method://host:port' is it always a ftp server? can it be srm? something else?
dst_loc_se = '/'+ _sentries[3]
# Run df to check space availability
s, o = commands.getstatusoutput('%s df %s' % (_setup_str, dst_loc_se))
if s != 0:
check_syserr(s, o)
tolog("!!WARNING!!2999!! Error in running df: %s" % o)
fail = -1
else:
# parse Wei's df script (extract the space info)
df_split = o.split("\n")[1]
p = r"XROOTD[ ]+\d+[ ]+\d+[ ]+(\S+)[ ]+"
pattern = compile(p)
available = findall(pattern, df_split)
try:
available_space = available[0]
except:
available_space = 999999999
if fail != 0:
return fail
else:
return available_space
def getLocalFileInfo(self, fname, csumtype="default"):
""" returns exit code (0 if OK), file size and checksum """
error = PilotErrors()
pilotErrorDiag = ""
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
tolog("getLocalFileInfo using setup: %s" % (_setup_str))
fsize = str(self.getRemoteFileSize(fname))
if fsize == "0":
pilotErrorDiag = "Encountered zero file size for file %s" % (fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_ZEROFILESIZE, pilotErrorDiag, 0, 0
if csumtype == "adler32":
if not self.useExternalAdler32():
tolog("External adler32.sh command not found, using built-in function")
fchecksum = self.adler32(fname)
else:
_CMD_CHECKSUM = "adler32.sh"
cmd = '%s %s %s' % (_setup_str, _CMD_CHECKSUM, fname)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
pilotErrorDiag = "Error running checksum command (%s): %s" % (_CMD_CHECKSUM, o)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
_fchecksum_prel, pilotErrorDiag = self.parseAdler32(o, fname)
if _fchecksum_prel == "":
return error.ERR_FAILEDADLOCAL, pilotErrorDiag, fsize, 0
fchecksum = _fchecksum_prel.split()[0]
if fchecksum == '00000001':
pilotErrorDiag = "Adler32 failed (returned %s)" % (fchecksum)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDADLOCAL, pilotErrorDiag, fsize, 0
tolog("Using checksum: %s" % (fchecksum))
else:
cmd = '%s which %s' % (_setup_str, CMD_CHECKSUM)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
tolog("cmd output: %s" % o)
cmd = '%s %s %s' % (_setup_str, CMD_CHECKSUM, fname)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
pilotErrorDiag = "Error running checksum command (%s): %s" % (CMD_CHECKSUM, o)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_FAILEDMD5LOCAL, pilotErrorDiag, fsize, 0
fchecksum = o.split()[0]
return 0, pilotErrorDiag, fsize, fchecksum
def useExternalAdler32(self):
""" check if the local adler32 command is available, if not md5sum will be used """
status = True
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
cmd = "%s which adler32.sh" % (_setup_str)
tolog("Executing command: %s" % (cmd))
s, o = commands.getstatusoutput(cmd)
if s != 0:
tolog("!!WARNING!!2999!! s=%d, o=%s" % (s, o))
status = False
return status
def parseAdler32(self, output, fname):
""" parse the adler32.sh output in case there was an AFS hickup """
_output = ""
pilotErrorDiag = ""
tolog("Parsing adler32 output: %s" % (output))
try:
_output_prel = output.split(" ")
except Exception, e:
pilotErrorDiag = "Exception caught in parseAdler32: %s, %s" % (output, str(e))
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
else:
if len(_output_prel) >= 2:
_adler32 = _output_prel[-2]
_filename = _output_prel[-1]
if len(_output_prel) > 2:
tolog("!!WARNING!!2999!! parseAdler32 found garbled output: %s" % (output))
if len(_adler32) != 8 or not _adler32.isalnum():
pilotErrorDiag = "parseAdler32: Wrong format of interpreted adler32: %s" % (_adler32)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
elif _filename != fname:
pilotErrorDiag = "parseAdler32: File names do not match: %s ne %s" % (_filename, fname)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
else:
_output = _adler32 + " " + _filename
tolog('Interpreted output ok: \"%s\"' % (_output))
else:
pilotErrorDiag = "parseAdler32 could not interpret output: %s" % (output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return _output, pilotErrorDiag
def getRemoteFileSize(self, fname):
""" return the file size of the remote file """
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = ''
size = 0
cmd = "%s stat %s" % (_setup_str, fname)
tolog("Executing command: %s" % (cmd))
stat = commands.getoutput(cmd)
try:
stat_split = stat.split("\n")[1]
except Exception, e:
tolog("!!WARNING!!2999!! Failed to execute commands:")
tolog(".stat: %s" % (stat))
tolog(".stat_split: %s" % (str(e)))
size = 0
else:
pattern = compile(r"Size:[ ]+(\d+)")
fsize = findall(pattern, stat_split)
try:
size = fsize[0]
except:
tolog("!!WARNING!!2999!! stat command did not return file size")
size = 0
return size
def getMover(cls, *args, **kwrds):
"""
Creates and provides exactly one instance for each required subclass of SiteMover.
Implements the Singleton pattern
"""
cl_name = cls.__name__
if not issubclass(cls, SiteMover):
log.error("Wrong Factory invocation, %s is not subclass of SiteMover" % cl_name)
else:
return cls(*args, **kwrds)
getMover = classmethod(getMover)
| false | true |
f7f4b0ced5af07dc040d5ff95aae1c26c66241f5 | 10,604 | py | Python | tests/models/validators/v3_0_0/jsd_f831d9ed2beb5c2b967aa10db8c22046.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 36 | 2021-05-18T16:24:19.000Z | 2022-03-05T13:44:41.000Z | tests/models/validators/v3_0_0/jsd_f831d9ed2beb5c2b967aa10db8c22046.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 15 | 2021-06-08T19:03:37.000Z | 2022-02-25T14:47:33.000Z | tests/models/validators/v3_0_0/jsd_f831d9ed2beb5c2b967aa10db8c22046.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 6 | 2021-06-10T09:32:01.000Z | 2022-01-12T08:34:39.000Z | # -*- coding: utf-8 -*-
"""Identity Services Engine getDeviceAdminAuthorizationRules data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF831D9Ed2Beb5C2B967AA10Db8C22046(object):
"""getDeviceAdminAuthorizationRules request schema definition."""
def __init__(self):
super(JSONSchemaValidatorF831D9Ed2Beb5C2B967AA10Db8C22046, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"items": {
"properties": {
"commands": {
"items": {
"type": "string"
},
"type": "array"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
},
"profile": {
"type": "string"
},
"rule": {
"properties": {
"condition": {
"properties": {
"attributeId": {
"type": "string"
},
"attributeName": {
"type": "string"
},
"attributeValue": {
"type": "string"
},
"children": {
"items": {
"properties": {
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"minItems": 2,
"type": "array"
},
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"description":
{
"default": "",
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"dictionaryValue": {
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"name": {
"type": "string"
},
"operator": {
"enum": [
"equals",
"notEquals",
"contains",
"notContains",
"matches",
"in",
"notIn",
"startsWith",
"notStartsWith",
"endsWith",
"notEndsWith",
"greaterThan",
"lessThan",
"greaterOrEquals",
"lessOrEquals",
"ipGreaterThan",
"ipLessThan",
"ipEquals",
"ipNotEquals"
],
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"minItems": 1,
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"default": {
"default": false,
"type": "boolean"
},
"hitCounts": {
"type": "integer"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"rank": {
"type": "integer"
},
"state": {
"default": "enabled",
"enum": [
"enabled",
"disabled",
"monitor"
],
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
}
},
"required": [
"rule"
],
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 29.131868 | 83 | 0.342512 |
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF831D9Ed2Beb5C2B967AA10Db8C22046(object):
def __init__(self):
super(JSONSchemaValidatorF831D9Ed2Beb5C2B967AA10Db8C22046, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"items": {
"properties": {
"commands": {
"items": {
"type": "string"
},
"type": "array"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
},
"profile": {
"type": "string"
},
"rule": {
"properties": {
"condition": {
"properties": {
"attributeId": {
"type": "string"
},
"attributeName": {
"type": "string"
},
"attributeValue": {
"type": "string"
},
"children": {
"items": {
"properties": {
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"minItems": 2,
"type": "array"
},
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"description":
{
"default": "",
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"dictionaryValue": {
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"name": {
"type": "string"
},
"operator": {
"enum": [
"equals",
"notEquals",
"contains",
"notContains",
"matches",
"in",
"notIn",
"startsWith",
"notStartsWith",
"endsWith",
"notEndsWith",
"greaterThan",
"lessThan",
"greaterOrEquals",
"lessOrEquals",
"ipGreaterThan",
"ipLessThan",
"ipEquals",
"ipNotEquals"
],
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"minItems": 1,
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"default": {
"default": false,
"type": "boolean"
},
"hitCounts": {
"type": "integer"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"rank": {
"type": "integer"
},
"state": {
"default": "enabled",
"enum": [
"enabled",
"disabled",
"monitor"
],
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
}
},
"required": [
"rule"
],
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| true | true |
f7f4b2a0f35f2a6006db97542c3632c23998b2d6 | 731 | py | Python | src/peltak/extra/gitflow/commands/__init__.py | novopl/peltak | 7c8ac44f994d923091a534870960fdae1e15e95e | [
"Apache-2.0"
] | 6 | 2015-09-10T13:20:34.000Z | 2021-02-15T08:10:27.000Z | src/peltak/extra/gitflow/commands/__init__.py | novopl/peltak | 7c8ac44f994d923091a534870960fdae1e15e95e | [
"Apache-2.0"
] | 41 | 2015-09-09T12:44:55.000Z | 2021-06-01T23:25:56.000Z | src/peltak/extra/gitflow/commands/__init__.py | novopl/peltak | 7c8ac44f994d923091a534870960fdae1e15e95e | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2020 Mateusz Klos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
#################################
``peltak [feature|task|release]``
#################################
peltak git-flow commands CLI definition.
"""
| 33.227273 | 74 | 0.686731 | true | true | |
f7f4b2a26f0964a8abf23111472ace6f079c8584 | 65,624 | py | Python | class_mbdata.py | lilianschuster/PyGEM | c805d09960f937fe6e35cdd1587f9089d4bec6b8 | [
"MIT"
] | null | null | null | class_mbdata.py | lilianschuster/PyGEM | c805d09960f937fe6e35cdd1587f9089d4bec6b8 | [
"MIT"
] | null | null | null | class_mbdata.py | lilianschuster/PyGEM | c805d09960f937fe6e35cdd1587f9089d4bec6b8 | [
"MIT"
] | null | null | null | """class of mass balance data and functions associated with manipulating the dataset to be in the proper format"""
# External libraries
import pandas as pd
import numpy as np
import calendar
import collections
import datetime
# Local libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
class MBData():
"""
Mass balance data properties and functions used to automatically retrieve data for calibration.
Attributes
----------
name : str
name of mass balance dataset.
ds_fp : str
file path
"""
def __init__(self,
name='wgms_d',
):
"""
Add variable name and specific properties associated with each variable.
"""
# Source of climate data
self.name = name
# Set parameters for ERA-Interim and CMIP5 netcdf files
if self.name == 'shean':
self.ds_fp = input.shean_fp
self.ds_fn = input.shean_fn
self.rgi_glacno_cn = input.shean_rgi_glacno_cn
self.mb_mwea_cn = input.shean_mb_cn
self.mb_mwea_err_cn = input.shean_mb_err_cn
self.t1_cn = input.shean_time1_cn
self.t2_cn = input.shean_time2_cn
self.area_cn = input.shean_area_cn
elif self.name == 'berthier':
self.ds_fp = input.berthier_fp
self.ds_fn = input.berthier_fn
self.rgi_glacno_cn = input.berthier_rgi_glacno_cn
self.mb_mwea_cn = input.berthier_mb_cn
self.mb_mwea_err_cn = input.berthier_mb_err_cn
self.t1_cn = input.berthier_time1_cn
self.t2_cn = input.berthier_time2_cn
self.area_cn = input.berthier_area_cn
elif self.name == 'braun':
self.ds_fp = input.braun_fp
self.ds_fn = input.braun_fn
self.rgi_glacno_cn = input.braun_rgi_glacno_cn
self.mb_mwea_cn = input.braun_mb_cn
self.mb_mwea_err_cn = input.braun_mb_err_cn
self.t1_cn = input.braun_time1_cn
self.t2_cn = input.braun_time2_cn
self.area_cn = input.braun_area_cn
elif self.name == 'mcnabb':
self.ds_fp = input.mcnabb_fp
self.ds_fn = input.mcnabb_fn
self.rgi_glacno_cn = input.mcnabb_rgiid_cn
self.mb_mwea_cn = input.mcnabb_mb_cn
self.mb_mwea_err_cn = input.mcnabb_mb_err_cn
self.t1_cn = input.mcnabb_time1_cn
self.t2_cn = input.mcnabb_time2_cn
self.area_cn = input.mcnabb_area_cn
elif self.name == 'larsen':
self.ds_fp = input.larsen_fp
self.ds_fn = input.larsen_fn
self.rgi_glacno_cn = input.larsen_rgiid_cn
self.mb_mwea_cn = input.larsen_mb_cn
self.mb_mwea_err_cn = input.larsen_mb_err_cn
self.t1_cn = input.larsen_time1_cn
self.t2_cn = input.larsen_time2_cn
self.area_cn = input.larsen_area_cn
elif self.name == 'brun':
self.data_fp = input.brun_fp
elif self.name == 'mauer':
self.ds_fp = input.mauer_fp
self.ds_fn = input.mauer_fn
self.rgi_glacno_cn = input.mauer_rgi_glacno_cn
self.mb_mwea_cn = input.mauer_mb_cn
self.mb_mwea_err_cn = input.mauer_mb_err_cn
self.t1_cn = input.mauer_time1_cn
self.t2_cn = input.mauer_time2_cn
elif self.name == 'wgms_d':
self.ds_fp = input.wgms_fp
self.ds_fn = input.wgms_d_fn_preprocessed
self.rgi_glacno_cn = input.wgms_rgi_glacno_cn
self.thickness_chg_cn = input.wgms_d_thickness_chg_cn
self.thickness_chg_err_cn = input.wgms_d_thickness_chg_err_cn
self.volume_chg_cn = input.wgms_d_volume_chg_cn
self.volume_chg_err_cn = input.wgms_d_volume_chg_err_cn
self.z1_cn = input.wgms_d_z1_cn
self.z2_cn = input.wgms_d_z2_cn
self.obs_type_cn = input.wgms_obs_type_cn
elif self.name == 'wgms_ee':
self.ds_fp = input.wgms_fp
self.ds_fn = input.wgms_ee_fn_preprocessed
self.rgi_glacno_cn = input.wgms_rgi_glacno_cn
self.mb_mwe_cn = input.wgms_ee_mb_cn
self.mb_mwe_err_cn = input.wgms_ee_mb_err_cn
self.t1_cn = input.wgms_ee_t1_cn
self.period_cn = input.wgms_ee_period_cn
self.z1_cn = input.wgms_ee_z1_cn
self.z2_cn = input.wgms_ee_z2_cn
self.obs_type_cn = input.wgms_obs_type_cn
elif self.name == 'cogley':
self.ds_fp = input.cogley_fp
self.ds_fn = input.cogley_fn_preprocessed
self.rgi_glacno_cn = input.cogley_rgi_glacno_cn
self.mass_chg_cn = input.cogley_mass_chg_cn
self.mass_chg_err_cn = input.cogley_mass_chg_err_cn
self.z1_cn = input.cogley_z1_cn
self.z2_cn = input.cogley_z2_cn
self.obs_type_cn = input.cogley_obs_type_cn
elif self.name == 'group':
self.ds_fp = input.mb_group_fp
self.ds_fn = input.mb_group_data_fn
self.ds_dict_fn = input.mb_group_dict_fn
self.rgi_regionO1_cn = 'rgi_regionO1'
self.t1_cn = input.mb_group_t1_cn
self.t2_cn = input.mb_group_t2_cn
def retrieve_mb(self, main_glac_rgi, main_glac_hyps, dates_table):
"""
Retrieve the mass balance for various datasets to be used in the calibration.
Parameters
----------
main_glac_rgi : pandas dataframe
dataframe containing relevant rgi glacier information
main_glac_hyps : pandas dataframe
dataframe containing glacier hypsometry
dates_table : pandas dataframe
dataframe containing dates of model run
Returns
-------
ds_output : pandas dataframe
dataframe of mass balance observations and other relevant information for calibration
"""
# Dictionary linking glacier number (glacno) to index for selecting elevation indices
glacnodict = dict(zip(main_glac_rgi['rgino_str'], main_glac_rgi.index.values))
# Column names of output
ds_output_cols = ['RGIId', 'glacno', 'group_name', 'obs_type', 'mb_mwe', 'mb_mwe_err', 'sla_m', 'z1_idx',
'z2_idx', 'z1', 'z2', 't1_idx', 't2_idx', 't1', 't2', 'area_km2', 'WGMS_ID']
# Avoid group data as processing is slightly different
if self.name is not 'group':
# Load all data
ds_all = pd.read_csv(self.ds_fp + self.ds_fn)
if str(ds_all.loc[0,self.rgi_glacno_cn]).startswith('RGI'):
ds_all['glacno'] = [str(x).split('-')[1] for x in ds_all[self.rgi_glacno_cn].values]
else:
ds_all['glacno'] = [str(int(x)).zfill(2) + '.' + str(int(np.round(x%1*10**5))).zfill(5)
for x in ds_all[self.rgi_glacno_cn]]
ds = ds_all.iloc[np.where(ds_all['glacno'].isin(list(main_glac_rgi.rgino_str.values)))[0],:].copy()
ds.reset_index(drop=True, inplace=True)
# Elevation indices
elev_bins = main_glac_hyps.columns.values.astype(int)
elev_bin_interval = elev_bins[1] - elev_bins[0]
# DATASET SPECIFIC CALCULATIONS
# ===== SHEAN GEODETIC DATA =====
if self.name in ['shean', 'berthier', 'braun']:
ds['z1_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))
ds['z2_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))
# Lower and upper bin elevations [masl]
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# Area [km2]
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
# Time indices
ds['t1'] = ds[self.t1_cn].astype(np.float64)
ds['t2'] = ds[self.t2_cn].astype(np.float64)
ds['t1_year'] = ds['t1'].astype(int)
ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)
ds.loc[ds['t1_month'] == 13, 't1_year'] = ds.loc[ds['t1_month'] == 13, 't1_year'] + 1
ds.loc[ds['t1_month'] == 13, 't1_month'] = 1
# add 1 to account for the fact that January starts with value of 1
ds['t2_year'] = ds['t2'].astype(int)
ds['t2_month'] = round(ds['t2'] % ds['t2_year'] * 12)
ds.loc[ds['t2_month'] == 0, 't2_month'] = 1
# do not need to add one for t2 because we want the last full time step
# Remove data with dates outside of calibration period
year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
(dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
ds = ds[ds['t2_year'] + ds['t2_month'] / 12 < year_decimal_max]
ds.reset_index(drop=True, inplace=True)
# Determine time indices (exclude spinup years, since massbal fxn discards spinup years)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
# if x == 10539:
# print(x, ds.loc[x,'RGIId'], ds.loc[x,'t1'], ds.loc[x,'t1_month'], ds.loc[x,'t2_month'])
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['t1_idx'] = ds['t1_idx'].astype(int)
# Specific mass balance [mwea]
ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
# # Total mass change [Gt]
# ds['mb_gt'] = ds[self.mb_vol_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000
# ds['mb_gt_err'] = ds[self.mb_vol_err_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000
if 'obs_type' not in list(ds.columns.values):
# Observation type
ds['obs_type'] = 'mb_geo'
# Add columns with nan for things not in list
ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]
for colname in ds_addcols:
ds[colname] = np.nan
# # ===== BERTHIER =====
# if self.name == 'berthier':
# ds['z1_idx'] = (
# (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))
# ds['z2_idx'] = (
# (main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))
# # Lower and upper bin elevations [masl]
# ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
# ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# # Area [km2]
# ds['area_km2'] = np.nan
# for x in range(ds.shape[0]):
# ds.loc[x,'area_km2'] = (
# main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
# ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
# # Time indices
# ds['t1'] = ds[self.t1_cn]
# ds['t2'] = ds[self.t2_cn]
# print(ds)
# ds['t1_year'] = ds['t1'].astype(int)
# ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)
# # add 1 to account for the fact that January starts with value of 1
# ds['t2_year'] = ds['t2'].astype(int)
# ds['t2_month'] = round(ds['t2'] % ds['t2_year'] * 12)
# # do not need to add one for t2 because we want the last full time step
# # Remove data with dates outside of calibration period
# year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
# year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
# (dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
# ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
# ds = ds[ds['t2_year'] + ds['t2_month'] / 12 <= year_decimal_max]
# ds.reset_index(drop=True, inplace=True)
# # Determine time indices (exclude spinup years, since massbal fxn discards spinup years)
# ds['t1_idx'] = np.nan
# ds['t2_idx'] = np.nan
# for x in range(ds.shape[0]):
# ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
# (ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
# ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
# (ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
# ds['t1_idx'] = ds['t1_idx'].astype(int)
# # Specific mass balance [mwea]
# print(ds[self.mb_mwea_cn])
# ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
# ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
# # Observation type
# ds['obs_type'] = 'mb_geo'
# # Add columns with nan for things not in list
# ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]
# for colname in ds_addcols:
# ds[colname] = np.nan
# ===== BRUN GEODETIC DATA =====
elif self.name == 'brun':
print('code brun')
# ===== MAUER GEODETIC DATA =====
elif self.name == 'mauer':
ds['z1_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))
ds['z2_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))
# Lower and upper bin elevations [masl]
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# Area [km2]
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
# Time indices
ds['t1'] = ds[self.t1_cn]
ds['t2'] = ds[self.t2_cn]
ds['t1_year'] = ds['t1'].astype(int)
ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)
# add 1 to account for the fact that January starts with value of 1
ds.loc[ds['t1_month'] > 12, 't1_month'] = 12
ds['t2_year'] = ds['t2'].astype(int)
ds['t2_month'] = 2
# Remove data with dates outside of calibration period
year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
(dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
ds = ds[ds['t2_year'] + ds['t2_month'] / 12 <= year_decimal_max]
ds.reset_index(drop=True, inplace=True)
# Determine time indices (exclude spinup years, since massbal fxn discards spinup years)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['t1_idx'] = ds['t1_idx'].astype(int)
# Specific mass balance [mwea]
ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
# Observation type
ds['obs_type'] = 'mb_geo'
# ===== WGMS GEODETIC DATA =====
elif self.name == 'wgms_d':
ds['z1_idx'] = np.nan
ds['z2_idx'] = np.nan
ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)
.argmax(axis=1))
ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))
.argmax(axis=1))
ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) -
ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))
ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) -
ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)
ds['z1_idx'] = ds['z1_idx'].values.astype(int)
ds['z2_idx'] = ds['z2_idx'].values.astype(int)
# Lower and upper bin elevations [masl]
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# Area [km2]
# use WGMS area when provided; otherwise use area from RGI
ds['area_km2_rgi'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2_rgi'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds['area_km2'] = np.nan
ds.loc[ds.AREA_SURVEY_YEAR.isnull(), 'area_km2'] = ds.loc[ds.AREA_SURVEY_YEAR.isnull(), 'area_km2_rgi']
ds.loc[ds.AREA_SURVEY_YEAR.notnull(), 'area_km2'] = ds.loc[ds.AREA_SURVEY_YEAR.notnull(),
'AREA_SURVEY_YEAR']
# Time indices
# remove data that does not have reference date or survey data
ds = ds[np.isnan(ds['REFERENCE_DATE']) == False]
ds = ds[np.isnan(ds['SURVEY_DATE']) == False]
ds.reset_index(drop=True, inplace=True)
# Extract date information
ds['t1_year'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
# if month/day unknown for start or end period, then replace with water year
# Add latitude
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
# Replace poor values of months
ds['t1_month'] = ds['t1_month'].map(lambda x: x if x <=12 else x%12)
ds['t2_month'] = ds['t2_month'].map(lambda x: x if x <=12 else x%12)
# Replace poor values of days
ds['t1_daysinmonth'] = (
[calendar.monthrange(ds.loc[x,'t1_year'], ds.loc[x,'t1_month'])[1] for x in range(ds.shape[0])])
ds['t2_daysinmonth'] = (
[calendar.monthrange(ds.loc[x,'t2_year'], ds.loc[x,'t2_month'])[1] for x in range(ds.shape[0])])
ds['t1_day'] = (ds.apply(lambda x: x['t1_day'] if x['t1_day'] <= x['t1_daysinmonth']
else x['t1_daysinmonth'], axis=1))
ds['t2_day'] = (ds.apply(lambda x: x['t2_day'] if x['t2_day'] <= x['t2_daysinmonth']
else x['t2_daysinmonth'], axis=1))
# Calculate decimal year and drop measurements outside of calibration period
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
# Time indices
# exclude spinup years, since massbal fxn discards spinup years
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
# Specific mass balance [mwe]
# if thickness change is available, then compute the specific mass balance with the thickness change
# otherwise, use the volume change and area to estimate the specific mass balance
# using thickness change
ds['mb_mwe'] = ds[self.thickness_chg_cn] / 1000 * input.density_ice / input.density_water
ds['mb_mwe_err'] = ds[self.thickness_chg_err_cn] / 1000 * input.density_ice / input.density_water
# using volume change (note: units volume change [1000 m3] and area [km2])
ds.loc[ds.mb_mwe.isnull(), 'mb_mwe'] = (
ds.loc[ds.mb_mwe.isnull(), self.volume_chg_cn] * 1000 / ds.loc[ds.mb_mwe.isnull(), 'area_km2'] *
(1/1000)**2 * input.density_ice / input.density_water)
ds.loc[ds.mb_mwe.isnull(), 'mb_mwe'] = (
ds.loc[ds.mb_mwe.isnull(), self.volume_chg_err_cn] * 1000 / ds.loc[ds.mb_mwe.isnull(), 'area_km2'] *
(1/1000)**2 * input.density_ice / input.density_water)
# Observation type
ds['obs_type'] = 'mb_geo'
# ===== WGMS GLACIOLOGICAL DATA =====
elif self.name == 'wgms_ee':
ds['z1_idx'] = np.nan
ds['z2_idx'] = np.nan
ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)
.argmax(axis=1))
ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))
.argmax(axis=1))
ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) -
ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))
ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) -
ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)
ds['z1_idx'] = ds['z1_idx'].values.astype(int)
ds['z2_idx'] = ds['z2_idx'].values.astype(int)
# Lower and upper bin elevations [masl]
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# Area [km2]
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds = ds[ds['area_km2'] > 0]
ds.reset_index(drop=True, inplace=True)
# Time indices
# winter and summer balances typically have the same data for 'BEGIN_PERIOD' and 'END_PERIOD' as the annual
# measurements, so need to set these dates manually
# Remove glaciers without begin or end period
ds = ds.drop(np.where(np.isnan(ds['BEGIN_PERIOD'].values))[0].tolist(), axis=0)
ds = ds.drop(np.where(np.isnan(ds['END_PERIOD'].values))[0].tolist(), axis=0)
ds.reset_index(drop=True, inplace=True)
ds['t1_year'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[6:].astype(int)
# if annual measurement and month/day unknown for start or end period, then replace with water year
# Add latitude
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
# annual start
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
# If period is summer/winter, adjust dates accordingly
for x in range(ds.shape[0]):
if (((ds.loc[x, 'lat_category'] == 'north') or (ds.loc[x, 'lat_category'] == 'northern')) and
(ds.loc[x, 'period'] == 'summer')):
ds.loc[x, 't1_year'] = ds.loc[x, 't1_year'] + 1
ds.loc[x, 't1_month'] = ds.loc[x, 'summer_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'summer_end']
elif (((ds.loc[x, 'lat_category'] == 'south') or (ds.loc[x, 'lat_category'] == 'southernmost')) and
(ds.loc[x, 'period'] == 'summer')):
ds.loc[x, 't1_month'] = ds.loc[x, 'summer_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'summer_end']
elif (((ds.loc[x, 'lat_category'] == 'north') or (ds.loc[x, 'lat_category'] == 'northern')) and
(ds.loc[x, 'period'] == 'winter')):
ds.loc[x, 't1_month'] = ds.loc[x, 'winter_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'winter_end']
elif (((ds.loc[x, 'lat_category'] == 'south') or (ds.loc[x, 'lat_category'] == 'southernmost')) and
(ds.loc[x, 'period'] == 'summer')):
ds.loc[x, 't1_year'] = ds.loc[x, 't1_year'] + 1
ds.loc[x, 't1_month'] = ds.loc[x, 'winter_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'winter_end']
ds.loc[x, 't1_day'] = 1
ds.loc[x, 't2_day'] = calendar.monthrange(ds.loc[x, 't2_year'], ds.loc[x, 't2_month'])[1]
# Replace poor values of months
ds['t1_month'] = ds['t1_month'].map(lambda x: x if x <=12 else x%12)
ds['t2_month'] = ds['t2_month'].map(lambda x: x if x <=12 else x%12)
# Calculate decimal year and drop measurements outside of calibration period
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
# Annual, summer, and winter time indices
# exclude spinup years, since massbal fxn discards spinup years
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
# Specific mass balance [mwe]
ds['mb_mwe'] = ds[self.mb_mwe_cn] / 1000
ds['mb_mwe_err'] = ds[self.mb_mwe_err_cn] / 1000
# # Total mass change [Gt]
# ds['mb_gt'] = ds[self.mb_mwe_cn] / 1000 * ds['area_km2'] * 1000**2 * input.density_water / 1000 / 10**9
# ds['mb_gt_err'] = (ds[self.mb_mwe_err_cn] / 1000 * ds['area_km2'] * 1000**2 * input.density_water / 1000
# / 10**9)
# Observation type
ds['obs_type'] = 'mb_glac'
# ===== WGMS GLACIOLOGICAL DATA =====
elif self.name == 'cogley':
ds['z1_idx'] = np.nan
ds['z2_idx'] = np.nan
ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)
.argmax(axis=1))
ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))
.argmax(axis=1))
ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) -
ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))
ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) -
ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)
ds['z1_idx'] = ds['z1_idx'].values.astype(int)
ds['z2_idx'] = ds['z2_idx'].values.astype(int)
# Lower and upper bin elevations [masl]
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# Area [km2]
# use WGMS area when provided; otherwise use area from RGI
ds['area_km2_rgi'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2_rgi'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
# Time indices
ds['t1_year'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
# if month/day unknown for start or end period, then replace with water year
# Add latitude
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
# Calculate decimal year and drop measurements outside of calibration period
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
# Time indices
# exclude spinup years, since massbal fxn discards spinup years
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
# Specific mass balance [mwe]
ds['mb_mwe'] = ds[self.mass_chg_cn] / input.density_water * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mass_chg_err_cn] / input.density_water * (ds['t2'] - ds['t1'])
# Observation type
ds['obs_type'] = 'mb_geo'
# ===== LARSEN OR MCNABB GEODETIC MASS BALANCE =====
elif self.name == 'mcnabb' or self.name == 'larsen':
ds['z1_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))
ds['z2_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))
# Lower and upper bin elevations [masl]
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
# Area [km2]
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
# Time
ds['t1_year'] = [int(str(x)[0:4]) for x in ds[self.t1_cn].values]
ds['t1_month'] = [int(str(x)[4:6]) for x in ds[self.t1_cn].values]
ds['t1_day'] = [int(str(x)[6:]) for x in ds[self.t1_cn].values]
ds['t2_year'] = [int(str(x)[0:4]) for x in ds[self.t2_cn].values]
ds['t2_month'] = [int(str(x)[4:6]) for x in ds[self.t2_cn].values]
ds['t2_day'] = [int(str(x)[6:]) for x in ds[self.t2_cn].values]
ds['t1_daysinmonth'] = ds.apply(lambda row: modelsetup.daysinmonth(row['t1_year'], row['t1_month']), axis=1)
ds['t2_daysinmonth'] = ds.apply(lambda row: modelsetup.daysinmonth(row['t2_year'], row['t2_month']), axis=1)
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1'] = ds['t1_year'] + (ds['t1_month'] + ds['t1_day'] / ds['t1_daysinmonth']) / 12
ds['t2'] = ds['t2_year'] + (ds['t2_month'] + ds['t2_day'] / ds['t2_daysinmonth']) / 12
# Remove data with dates outside of calibration period
year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
(dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
ds = ds[ds['t2_year'] + ds['t2_month'] / 12 < year_decimal_max]
ds.reset_index(drop=True, inplace=True)
# Determine time indices (exclude spinup years, since massbal fxn discards spinup years)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['t1_idx'] = ds['t1_idx'].astype(int)
# Specific mass balance [mwea]
ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
# Total mass change [Gt]
# ds['mb_gt'] = ds[self.mb_vol_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000
# ds['mb_gt_err'] = ds[self.mb_vol_err_cn] * (ds['t2'] - ds['t1']) * (1/1000)**3 * input.density_water / 1000
# Observation type
ds['obs_type'] = 'mb_geo'
# ====== GROUP DATA ======
elif self.name == 'group':
# Load all data
ds_all = pd.read_csv(self.ds_fp + self.ds_fn, encoding='latin1')
# Dictionary linking group_names with the RGIIds
ds_dict_raw = pd.read_csv(self.ds_fp + self.ds_dict_fn)
ds_dict = dict(zip(ds_dict_raw['RGIId'], ds_dict_raw['group_name']))
# For each unique group name identify all glaciers associated with the group and test if all those glaciers
# are included in the model run via main_glac_rgi
group_names_unique = list(set(ds_dict.values()))
ds_dict_keyslist = [[] for x in group_names_unique]
for n, group in enumerate(group_names_unique):
ds_dict_keyslist[n] = [group, [k for k, v in ds_dict.items() if v == group]]
ds_all['glaciers_present'] = set(ds_dict_keyslist[n][1]).issubset(main_glac_rgi.RGIId.values.tolist())
ds_all.loc[n, 'first_RGIId'] = ds_dict_keyslist[n][1][0]
# Remove groups where all glaciers are not included
ds = ds_all[ds_all.glaciers_present == True].copy()
ds.reset_index(drop=True, inplace=True)
# Time indices
ds['t1_year'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[6:].astype(int)
# if month/day unknown for start or end period, then replace with water year
# Add latitude
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['first_RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
# Calculate decimal year and drop measurements outside of calibration period
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
# Time indices
# exclude spinup years, since massbal fxn discards spinup years
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
# Mass balance [mwe]
ds['mb_mwe'] = np.nan
ds['mb_mwe_err'] = np.nan
ds.loc[ds['dhdt_ma'].notnull(), 'mb_mwe'] = (
ds.loc[ds['dhdt_ma'].notnull(), 'dhdt_ma'] * input.density_ice / input.density_water *
(ds['t2'] - ds['t1']))
ds.loc[ds['dhdt_ma'].notnull(), 'mb_mwe_err'] = (
ds.loc[ds['dhdt_ma'].notnull(), 'dhdt_unc_ma'] * input.density_ice / input.density_water *
(ds['t2'] - ds['t1']))
# Add columns with nan for things not in list
ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]
for colname in ds_addcols:
ds[colname] = np.nan
# Select output
ds_output = ds[ds_output_cols].sort_values(['glacno', 't1_idx'])
ds_output.reset_index(drop=True, inplace=True)
return ds_output
def select_best_mb(cal_data):
"""
Retrieve 'best' mass balance (observed > extrapolated) and longest time period
Returns
-------
cal_data_best : pandas dataframe
dataframe of 'best' mass balance observations and other relevant information for calibration
"""
cal_data['dt'] = cal_data['t2'] - cal_data['t1']
rgiids = list(cal_data.RGIId.values)
rgiids_count = collections.Counter(rgiids)
rgiids_multiple = []
rgiids_single_idx = []
cal_data_rgiids_all = list(cal_data.RGIId.values)
for x in rgiids_count:
if rgiids_count[x] > 1:
rgiids_multiple.append(x)
else:
rgiids_single_idx.append(cal_data_rgiids_all.index(x))
rgiids_multiple = sorted(rgiids_multiple)
rgiids_single_idx = sorted(rgiids_single_idx)
# Select all data with single value
cal_data_best = cal_data.loc[rgiids_single_idx,:]
# Append 'best' value for those with multiple observations
for rgiid in rgiids_multiple:
cal_data_multiple = cal_data[cal_data['RGIId'] == rgiid]
# Select observations over extrapolated values
if 'mb_geo' in list(cal_data_multiple.obs_type.values):
cal_data_multiple = cal_data_multiple[cal_data_multiple.obs_type == 'mb_geo']
# Select longest time series
cal_data_append = cal_data_multiple[cal_data_multiple.dt == cal_data_multiple.dt.max()]
cal_data_best = pd.concat([cal_data_best, cal_data_append], axis=0)
cal_data_best = cal_data_best.sort_values(by=['RGIId'])
cal_data_best.reset_index(inplace=True, drop=True)
return cal_data_best
#%% Testing
if __name__ == '__main__':
# Glacier selection
rgi_regionsO1 = [1]
rgi_glac_number = 'all'
glac_no = input.glac_no
startyear = 1950
endyear = 2018
# # Select glaciers
# for rgi_regionsO1 in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]:
# main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=[rgi_regionsO1], rgi_regionsO2 = 'all',
# rgi_glac_number='all')
# marine = main_glac_rgi[main_glac_rgi['TermType'] == 1]
# lake = main_glac_rgi[main_glac_rgi['TermType'] == 2]
# print('Region ' + str(rgi_regionsO1) + ':')
# print(' marine:', np.round(marine.Area.sum() / main_glac_rgi.Area.sum() * 100,0))
# print(' lake:', np.round(lake.Area.sum() / main_glac_rgi.Area.sum() * 100,0))
main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=rgi_regionsO1, rgi_regionsO2 = 'all',
rgi_glac_number=rgi_glac_number, glac_no=input.glac_no)
# Glacier hypsometry [km**2], total area
main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, input.hyps_filedict,
input.hyps_colsdrop)
# Determine dates_table_idx that coincides with data
dates_table = modelsetup.datesmodelrun(startyear, endyear, spinupyears=0, option_wateryear=3)
elev_bins = main_glac_hyps.columns.values.astype(int)
elev_bin_interval = elev_bins[1] - elev_bins[0]
#%%
# cal_datasets = ['shean']
# cal_datasets = ['braun', 'mcnabb', 'larsen', 'berthier']
# cal_datasets = ['braun', 'larsen', 'mcnabb']
cal_datasets = ['braun']
# cal_datasets = ['shean', 'mauer', 'wgms_d', 'wgms_ee', 'cogley', 'mcnabb', 'larsen']
# cal_datasets = ['group']
cal_data = pd.DataFrame()
for dataset in cal_datasets:
cal_subset = MBData(name=dataset)
cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table)
cal_data = cal_data.append(cal_subset_data, ignore_index=True)
# Count unique glaciers and fraction of total area
glacno_unique = list(cal_subset_data.glacno.unique())
main_glac_rgi_cal = modelsetup.selectglaciersrgitable(glac_no = glacno_unique)
print(dataset, '- glacier area covered: ',
np.round(main_glac_rgi_cal.Area.sum() / main_glac_rgi.Area.sum() * 100,1),'%')
cal_data = cal_data.sort_values(['glacno', 't1_idx'])
cal_data.reset_index(drop=True, inplace=True)
# Count unique glaciers and fraction of total area
if len(cal_datasets) > 1:
glacno_unique = list(cal_data.glacno.unique())
main_glac_rgi_cal = modelsetup.selectglaciersrgitable(glac_no = glacno_unique)
print('All datasets glacier area covered: ',
np.round(main_glac_rgi_cal.Area.sum() / main_glac_rgi.Area.sum() * 100,1),'%')
# # Export 'best' dataset
# cal_data_best = select_best_mb(cal_data)
# cal_data_best = cal_data_best.drop(['group_name', 'sla_m', 'WGMS_ID'], axis=1)
# cal_data_best['mb_mwea'] = cal_data_best.mb_mwe / cal_data_best.dt
# cal_data_best['mb_mwea_sigma'] = cal_data_best.mb_mwe_err / cal_data_best.dt
# cal_data_best.to_csv(input.braun_fp + 'braun_AK_all_20190924_wlarsen_mcnabb_best.csv', index=False)
#%% PRE-PROCESS MCNABB DATA
# # Remove glaciers that:
# # (1) poor percent coverage
# # (2) uncertainty is too hig
#
# density_ice_brun = 850
#
# mcnabb_fn = 'McNabb_data_all_raw.csv'
# output_fn = 'McNabb_data_all_preprocessed.csv'
#
# # Load data
# ds_raw = pd.read_csv(input.mcnabb_fp + mcnabb_fn)
# ds_raw['glacno_str'] = [x.split('-')[1] for x in ds_raw.RGIId.values]
# ds_raw['mb_mwea'] = ds_raw['smb'] * density_ice_brun / input.density_water
# ds_raw['mb_mwea_sigma'] = ds_raw['e_dh'] * density_ice_brun / input.density_water
# nraw = ds_raw.shape[0]
#
# # remove data with poor coverage
# ds = ds_raw[ds_raw['pct_data'] > 0.75].copy()
# ds.reset_index(drop=True, inplace=True)
# nraw_goodcoverage = ds.shape[0]
# print('Glaciers removed (poor coverage):', nraw - nraw_goodcoverage, 'points')
#
# # remove glaciers with too high uncertainty (> 1.96 stdev)
# uncertainty_median = ds.e_dh.median()
# ds['e_mad'] = np.absolute(ds['e_dh'] - uncertainty_median)
# uncertainty_mad = np.median(ds['e_mad'])
# print('uncertainty median and mad [m/yr]:', np.round(uncertainty_median,2), np.round(uncertainty_mad,2))
# ds = ds[ds['e_dh'] < uncertainty_median + 3*uncertainty_mad].copy()
# ds = ds.sort_values('RGIId')
# ds.reset_index(drop=True, inplace=True)
# print('Glaciers removed (too high uncertainty):', nraw_goodcoverage - ds.shape[0], 'points')
#
# # Select glaciers
# glac_no = sorted(set(ds['glacno_str'].values))
# main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glac_no)
#
# # Count unique glaciers and fraction of total area
# print('Glacier area covered: ', np.round(main_glac_rgi['Area'].sum(),1),'km2')
#
## # All values
## rgiid_values = list(ds.RGIId.values)
## rgiid_idx = []
## for rgiid in rgiid_values:
## rgiid_idx.append(np.where(main_glac_rgi.RGIId.values == rgiid)[0][0])
## ds['CenLat'] = main_glac_rgi.loc[rgiid_idx, 'CenLat'].values
## ds['CenLon'] = main_glac_rgi.loc[rgiid_idx, 'CenLon'].values
#
#
# # Only longest value
# ds_output = pd.DataFrame(np.zeros((len(glac_no), ds.shape[1])), columns=ds.columns)
# for nglac, glacno in enumerate(glac_no):
# ds_subset = ds.loc[np.where(ds.glacno_str.values == glacno)[0],:]
# ds_subset.reset_index(inplace=True)
# ds_output.loc[nglac,:] = (
# ds_subset.loc[np.where(ds_subset['pct_data'].values == ds_subset['pct_data'].max())[0][0],:])
#
# # Minimum and maximum mass balances
# print('Max MB:', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.max())[0][0],'smb'],2),
# '+/-', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.max())[0][0],'e_dh'],2))
# print('Min MB:', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.min())[0][0],'smb'],2),
# '+/-', np.round(ds_output.loc[np.where(ds_output.smb.values == ds_output.smb.min())[0][0],'e_dh'],2))
#
# # Adjust date to YYYYMMDD format
# print('\nCHECK ALL YEARS AFTER IN 2000s\n')
# ds_output['y0'] = ['20' + str(x.split('/')[2]).zfill(2) for x in ds_output['date0'].values]
# ds_output['m0'] = [str(x.split('/')[0]).zfill(2) for x in ds_output['date0'].values]
# ds_output['d0'] = [str(x.split('/')[1]).zfill(2) for x in ds_output['date0'].values]
# ds_output['y1'] = ['20' + str(x.split('/')[2]).zfill(2) for x in ds_output['date1'].values]
# ds_output['m1'] = [str(x.split('/')[0]).zfill(2) for x in ds_output['date1'].values]
# ds_output['d1'] = [str(x.split('/')[1]).zfill(2) for x in ds_output['date1'].values]
# ds_output['date0'] = ds_output['y0'] + ds_output['m0'] + ds_output['d0']
# ds_output['date1'] = ds_output['y1'] + ds_output['m1'] + ds_output['d1']
# ds_output.drop(['y0', 'm0', 'd0', 'y1', 'm1', 'd1'], axis=1, inplace=True)
#
# ds_output.to_csv(input.mcnabb_fp + output_fn)
#%%
# # PRE-PROCESS MAUER DATA
# mauer_fn = 'Mauer_geoMB_HMA_1970s_2000.csv'
# min_pctCov = 80
#
# ds = pd.read_csv(input.mauer_fp + mauer_fn)
# ds.dropna(axis=0, how='any', inplace=True)
# ds.sort_values('RGIId')
# ds.reset_index(drop=True, inplace=True)
# demyears = ds.demYears.tolist()
# demyears = [x.split(';') for x in demyears]
# t1_raw = []
# t2 = []
# for x in demyears:
# if '2000' in x:
# x.remove('2000')
# t2.append(2000)
# t1_raw.append([np.float(y) for y in x])
# t1 = np.array([np.array(x).mean() for x in t1_raw])
# ds['t1'] = t1
# ds['t2'] = t2
# # Minimum percent coverage
# ds2 = ds[ds.pctCov > min_pctCov].copy()
# ds2['RegO1'] = ds2.RGIId.astype(int)
# # Glacier number and index for comparison
# ds2['glacno'] = ((ds2['RGIId'] % 1) * 10**5).round(0).astype(int)
# ds_list = ds2[['RegO1', 'glacno']]
# ds2['RGIId'] = ds2['RegO1'] + ds2['glacno'] / 10**5
# ds2.reset_index(drop=True, inplace=True)
# ds2.drop(['RegO1', 'glacno'], axis=1, inplace=True)
# ds2.to_csv(input.mauer_fp + input.mauer_fn.split('.csv')[0] + '_min' + str(min_pctCov) + 'pctCov.csv', index=False)
#
# # Pickle lists of glacier numbers for each region
# import pickle
# for reg in [13, 14, 15]:
# ds_subset = ds_list[ds_list['RegO1'] == reg]
# rgi_glacno_list = [str(x).rjust(5,'0') for x in ds_subset['glacno'].tolist()]
# pickle_fn = 'R' + str(reg) + '_mauer_1970s_2000_rgi_glac_number.pkl'
# print('Region ' + str(reg) + ' list:', rgi_glacno_list)
# print(pickle_fn)
##
## with open(pickle_fn, 'wb') as f:
## pickle.dump(rgi_glacno_list, f)
#%%
# import pickle
# region = 15
#
# mauer_pickle_fn = 'R' + str(region) + '_mauer_1970s_2000_rgi_glac_number.pkl'
#
# with open(mauer_pickle_fn, 'rb') as f:
# rgi_glac_number = pickle.load(f)
#
# # Select glaciers
# main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=[region], rgi_regionsO2 = 'all',
# rgi_glac_number=rgi_glac_number)
# # Glacier hypsometry [km**2], total area
# main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath,
# input.hyps_filedict, input.hyps_colsdrop)
# # Determine dates_table_idx that coincides with data
# dates_table = modelsetup.datesmodelrun(1970, 2017, spinupyears=0)
#
#
# # Select mass balance data
# mb1 = MBData(name='mauer')
# ds_mb = mb1.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table) | 59.174031 | 121 | 0.548671 |
import pandas as pd
import numpy as np
import calendar
import collections
import datetime
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
class MBData():
def __init__(self,
name='wgms_d',
):
self.name = name
if self.name == 'shean':
self.ds_fp = input.shean_fp
self.ds_fn = input.shean_fn
self.rgi_glacno_cn = input.shean_rgi_glacno_cn
self.mb_mwea_cn = input.shean_mb_cn
self.mb_mwea_err_cn = input.shean_mb_err_cn
self.t1_cn = input.shean_time1_cn
self.t2_cn = input.shean_time2_cn
self.area_cn = input.shean_area_cn
elif self.name == 'berthier':
self.ds_fp = input.berthier_fp
self.ds_fn = input.berthier_fn
self.rgi_glacno_cn = input.berthier_rgi_glacno_cn
self.mb_mwea_cn = input.berthier_mb_cn
self.mb_mwea_err_cn = input.berthier_mb_err_cn
self.t1_cn = input.berthier_time1_cn
self.t2_cn = input.berthier_time2_cn
self.area_cn = input.berthier_area_cn
elif self.name == 'braun':
self.ds_fp = input.braun_fp
self.ds_fn = input.braun_fn
self.rgi_glacno_cn = input.braun_rgi_glacno_cn
self.mb_mwea_cn = input.braun_mb_cn
self.mb_mwea_err_cn = input.braun_mb_err_cn
self.t1_cn = input.braun_time1_cn
self.t2_cn = input.braun_time2_cn
self.area_cn = input.braun_area_cn
elif self.name == 'mcnabb':
self.ds_fp = input.mcnabb_fp
self.ds_fn = input.mcnabb_fn
self.rgi_glacno_cn = input.mcnabb_rgiid_cn
self.mb_mwea_cn = input.mcnabb_mb_cn
self.mb_mwea_err_cn = input.mcnabb_mb_err_cn
self.t1_cn = input.mcnabb_time1_cn
self.t2_cn = input.mcnabb_time2_cn
self.area_cn = input.mcnabb_area_cn
elif self.name == 'larsen':
self.ds_fp = input.larsen_fp
self.ds_fn = input.larsen_fn
self.rgi_glacno_cn = input.larsen_rgiid_cn
self.mb_mwea_cn = input.larsen_mb_cn
self.mb_mwea_err_cn = input.larsen_mb_err_cn
self.t1_cn = input.larsen_time1_cn
self.t2_cn = input.larsen_time2_cn
self.area_cn = input.larsen_area_cn
elif self.name == 'brun':
self.data_fp = input.brun_fp
elif self.name == 'mauer':
self.ds_fp = input.mauer_fp
self.ds_fn = input.mauer_fn
self.rgi_glacno_cn = input.mauer_rgi_glacno_cn
self.mb_mwea_cn = input.mauer_mb_cn
self.mb_mwea_err_cn = input.mauer_mb_err_cn
self.t1_cn = input.mauer_time1_cn
self.t2_cn = input.mauer_time2_cn
elif self.name == 'wgms_d':
self.ds_fp = input.wgms_fp
self.ds_fn = input.wgms_d_fn_preprocessed
self.rgi_glacno_cn = input.wgms_rgi_glacno_cn
self.thickness_chg_cn = input.wgms_d_thickness_chg_cn
self.thickness_chg_err_cn = input.wgms_d_thickness_chg_err_cn
self.volume_chg_cn = input.wgms_d_volume_chg_cn
self.volume_chg_err_cn = input.wgms_d_volume_chg_err_cn
self.z1_cn = input.wgms_d_z1_cn
self.z2_cn = input.wgms_d_z2_cn
self.obs_type_cn = input.wgms_obs_type_cn
elif self.name == 'wgms_ee':
self.ds_fp = input.wgms_fp
self.ds_fn = input.wgms_ee_fn_preprocessed
self.rgi_glacno_cn = input.wgms_rgi_glacno_cn
self.mb_mwe_cn = input.wgms_ee_mb_cn
self.mb_mwe_err_cn = input.wgms_ee_mb_err_cn
self.t1_cn = input.wgms_ee_t1_cn
self.period_cn = input.wgms_ee_period_cn
self.z1_cn = input.wgms_ee_z1_cn
self.z2_cn = input.wgms_ee_z2_cn
self.obs_type_cn = input.wgms_obs_type_cn
elif self.name == 'cogley':
self.ds_fp = input.cogley_fp
self.ds_fn = input.cogley_fn_preprocessed
self.rgi_glacno_cn = input.cogley_rgi_glacno_cn
self.mass_chg_cn = input.cogley_mass_chg_cn
self.mass_chg_err_cn = input.cogley_mass_chg_err_cn
self.z1_cn = input.cogley_z1_cn
self.z2_cn = input.cogley_z2_cn
self.obs_type_cn = input.cogley_obs_type_cn
elif self.name == 'group':
self.ds_fp = input.mb_group_fp
self.ds_fn = input.mb_group_data_fn
self.ds_dict_fn = input.mb_group_dict_fn
self.rgi_regionO1_cn = 'rgi_regionO1'
self.t1_cn = input.mb_group_t1_cn
self.t2_cn = input.mb_group_t2_cn
def retrieve_mb(self, main_glac_rgi, main_glac_hyps, dates_table):
glacnodict = dict(zip(main_glac_rgi['rgino_str'], main_glac_rgi.index.values))
ds_output_cols = ['RGIId', 'glacno', 'group_name', 'obs_type', 'mb_mwe', 'mb_mwe_err', 'sla_m', 'z1_idx',
'z2_idx', 'z1', 'z2', 't1_idx', 't2_idx', 't1', 't2', 'area_km2', 'WGMS_ID']
if self.name is not 'group':
ds_all = pd.read_csv(self.ds_fp + self.ds_fn)
if str(ds_all.loc[0,self.rgi_glacno_cn]).startswith('RGI'):
ds_all['glacno'] = [str(x).split('-')[1] for x in ds_all[self.rgi_glacno_cn].values]
else:
ds_all['glacno'] = [str(int(x)).zfill(2) + '.' + str(int(np.round(x%1*10**5))).zfill(5)
for x in ds_all[self.rgi_glacno_cn]]
ds = ds_all.iloc[np.where(ds_all['glacno'].isin(list(main_glac_rgi.rgino_str.values)))[0],:].copy()
ds.reset_index(drop=True, inplace=True)
elev_bins = main_glac_hyps.columns.values.astype(int)
elev_bin_interval = elev_bins[1] - elev_bins[0]
if self.name in ['shean', 'berthier', 'braun']:
ds['z1_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))
ds['z2_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds['t1'] = ds[self.t1_cn].astype(np.float64)
ds['t2'] = ds[self.t2_cn].astype(np.float64)
ds['t1_year'] = ds['t1'].astype(int)
ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)
ds.loc[ds['t1_month'] == 13, 't1_year'] = ds.loc[ds['t1_month'] == 13, 't1_year'] + 1
ds.loc[ds['t1_month'] == 13, 't1_month'] = 1
ds['t2_year'] = ds['t2'].astype(int)
ds['t2_month'] = round(ds['t2'] % ds['t2_year'] * 12)
ds.loc[ds['t2_month'] == 0, 't2_month'] = 1
year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
(dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
ds = ds[ds['t2_year'] + ds['t2_month'] / 12 < year_decimal_max]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['t1_idx'] = ds['t1_idx'].astype(int)
ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
ype' not in list(ds.columns.values):
ds['obs_type'] = 'mb_geo'
ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]
for colname in ds_addcols:
ds[colname] = np.nan
1).astype(int))
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds['t1'] = ds[self.t1_cn]
ds['t2'] = ds[self.t2_cn]
ds['t1_year'] = ds['t1'].astype(int)
ds['t1_month'] = round(ds['t1'] % ds['t1_year'] * 12 + 1)
ds.loc[ds['t1_month'] > 12, 't1_month'] = 12
ds['t2_year'] = ds['t2'].astype(int)
ds['t2_month'] = 2
year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
(dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
ds = ds[ds['t2_year'] + ds['t2_month'] / 12 <= year_decimal_max]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['t1_idx'] = ds['t1_idx'].astype(int)
ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
ds['obs_type'] = 'mb_geo'
elif self.name == 'wgms_d':
ds['z1_idx'] = np.nan
ds['z2_idx'] = np.nan
ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)
.argmax(axis=1))
ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))
.argmax(axis=1))
ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) -
ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))
ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) -
ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)
ds['z1_idx'] = ds['z1_idx'].values.astype(int)
ds['z2_idx'] = ds['z2_idx'].values.astype(int)
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
ds['area_km2_rgi'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2_rgi'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds['area_km2'] = np.nan
ds.loc[ds.AREA_SURVEY_YEAR.isnull(), 'area_km2'] = ds.loc[ds.AREA_SURVEY_YEAR.isnull(), 'area_km2_rgi']
ds.loc[ds.AREA_SURVEY_YEAR.notnull(), 'area_km2'] = ds.loc[ds.AREA_SURVEY_YEAR.notnull(),
'AREA_SURVEY_YEAR']
ds = ds[np.isnan(ds['REFERENCE_DATE']) == False]
ds = ds[np.isnan(ds['SURVEY_DATE']) == False]
ds.reset_index(drop=True, inplace=True)
ds['t1_year'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
ds['t1_month'] = ds['t1_month'].map(lambda x: x if x <=12 else x%12)
ds['t2_month'] = ds['t2_month'].map(lambda x: x if x <=12 else x%12)
ds['t1_daysinmonth'] = (
[calendar.monthrange(ds.loc[x,'t1_year'], ds.loc[x,'t1_month'])[1] for x in range(ds.shape[0])])
ds['t2_daysinmonth'] = (
[calendar.monthrange(ds.loc[x,'t2_year'], ds.loc[x,'t2_month'])[1] for x in range(ds.shape[0])])
ds['t1_day'] = (ds.apply(lambda x: x['t1_day'] if x['t1_day'] <= x['t1_daysinmonth']
else x['t1_daysinmonth'], axis=1))
ds['t2_day'] = (ds.apply(lambda x: x['t2_day'] if x['t2_day'] <= x['t2_daysinmonth']
else x['t2_daysinmonth'], axis=1))
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['mb_mwe'] = ds[self.thickness_chg_cn] / 1000 * input.density_ice / input.density_water
ds['mb_mwe_err'] = ds[self.thickness_chg_err_cn] / 1000 * input.density_ice / input.density_water
ds.loc[ds.mb_mwe.isnull(), 'mb_mwe'] = (
ds.loc[ds.mb_mwe.isnull(), self.volume_chg_cn] * 1000 / ds.loc[ds.mb_mwe.isnull(), 'area_km2'] *
(1/1000)**2 * input.density_ice / input.density_water)
ds.loc[ds.mb_mwe.isnull(), 'mb_mwe'] = (
ds.loc[ds.mb_mwe.isnull(), self.volume_chg_err_cn] * 1000 / ds.loc[ds.mb_mwe.isnull(), 'area_km2'] *
(1/1000)**2 * input.density_ice / input.density_water)
ds['obs_type'] = 'mb_geo'
elif self.name == 'wgms_ee':
ds['z1_idx'] = np.nan
ds['z2_idx'] = np.nan
ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)
.argmax(axis=1))
ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))
.argmax(axis=1))
ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) -
ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))
ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) -
ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)
ds['z1_idx'] = ds['z1_idx'].values.astype(int)
ds['z2_idx'] = ds['z2_idx'].values.astype(int)
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds = ds[ds['area_km2'] > 0]
ds.reset_index(drop=True, inplace=True)
ds = ds.drop(np.where(np.isnan(ds['BEGIN_PERIOD'].values))[0].tolist(), axis=0)
ds = ds.drop(np.where(np.isnan(ds['END_PERIOD'].values))[0].tolist(), axis=0)
ds.reset_index(drop=True, inplace=True)
ds['t1_year'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds['BEGIN_PERIOD'].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds['END_PERIOD'].astype(str).str.split('.').str[0].str[6:].astype(int)
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
for x in range(ds.shape[0]):
if (((ds.loc[x, 'lat_category'] == 'north') or (ds.loc[x, 'lat_category'] == 'northern')) and
(ds.loc[x, 'period'] == 'summer')):
ds.loc[x, 't1_year'] = ds.loc[x, 't1_year'] + 1
ds.loc[x, 't1_month'] = ds.loc[x, 'summer_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'summer_end']
elif (((ds.loc[x, 'lat_category'] == 'south') or (ds.loc[x, 'lat_category'] == 'southernmost')) and
(ds.loc[x, 'period'] == 'summer')):
ds.loc[x, 't1_month'] = ds.loc[x, 'summer_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'summer_end']
elif (((ds.loc[x, 'lat_category'] == 'north') or (ds.loc[x, 'lat_category'] == 'northern')) and
(ds.loc[x, 'period'] == 'winter')):
ds.loc[x, 't1_month'] = ds.loc[x, 'winter_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'winter_end']
elif (((ds.loc[x, 'lat_category'] == 'south') or (ds.loc[x, 'lat_category'] == 'southernmost')) and
(ds.loc[x, 'period'] == 'summer')):
ds.loc[x, 't1_year'] = ds.loc[x, 't1_year'] + 1
ds.loc[x, 't1_month'] = ds.loc[x, 'winter_begin']
ds.loc[x, 't2_month'] = ds.loc[x, 'winter_end']
ds.loc[x, 't1_day'] = 1
ds.loc[x, 't2_day'] = calendar.monthrange(ds.loc[x, 't2_year'], ds.loc[x, 't2_month'])[1]
ds['t1_month'] = ds['t1_month'].map(lambda x: x if x <=12 else x%12)
ds['t2_month'] = ds['t2_month'].map(lambda x: x if x <=12 else x%12)
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['mb_mwe'] = ds[self.mb_mwe_cn] / 1000
ds['mb_mwe_err'] = ds[self.mb_mwe_err_cn] / 1000
ds['obs_type'] = 'mb_glac'
elif self.name == 'cogley':
ds['z1_idx'] = np.nan
ds['z2_idx'] = np.nan
ds.loc[ds[self.z1_cn] == 9999, 'z1_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z1_cn] == 9999, 'glacno'].map(glacnodict)].values != 0)
.argmax(axis=1))
ds.loc[ds[self.z2_cn] == 9999, 'z2_idx'] = (
(main_glac_hyps.iloc[ds.loc[ds[self.z2_cn] == 9999, 'glacno'].map(glacnodict)].values.cumsum(1))
.argmax(axis=1))
ds.loc[ds[self.z1_cn] != 9999, 'z1_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z1_cn] != 9999, self.z1_cn].shape[0],1)) -
ds.loc[ds[self.z1_cn] != 9999, self.z1_cn][:,np.newaxis]) > 0).argmax(axis=1))
ds.loc[ds[self.z2_cn] != 9999, 'z2_idx'] = (
((np.tile(elev_bins, (ds.loc[ds[self.z2_cn] != 9999, self.z2_cn].shape[0],1)) -
ds.loc[ds[self.z2_cn] != 9999, self.z2_cn][:,np.newaxis]) > 0).argmax(axis=1) - 1)
ds['z1_idx'] = ds['z1_idx'].values.astype(int)
ds['z2_idx'] = ds['z2_idx'].values.astype(int)
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
ds['area_km2_rgi'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2_rgi'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds['t1_year'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds['REFERENCE_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds['SURVEY_DATE'].astype(str).str.split('.').str[0].str[6:].astype(int)
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['mb_mwe'] = ds[self.mass_chg_cn] / input.density_water * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mass_chg_err_cn] / input.density_water * (ds['t2'] - ds['t1'])
ds['obs_type'] = 'mb_geo'
elif self.name == 'mcnabb' or self.name == 'larsen':
ds['z1_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values != 0).argmax(axis=1).astype(int))
ds['z2_idx'] = (
(main_glac_hyps.iloc[ds['glacno'].map(glacnodict)].values.cumsum(1)).argmax(axis=1).astype(int))
ds['z1'] = elev_bins[ds['z1_idx'].values] - elev_bin_interval/2
ds['z2'] = elev_bins[ds['z2_idx'].values] + elev_bin_interval/2
ds['area_km2'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'area_km2'] = (
main_glac_hyps.iloc[glacnodict[ds.loc[x,'glacno']],
ds.loc[x,'z1_idx']:ds.loc[x,'z2_idx']+1].sum())
ds['t1_year'] = [int(str(x)[0:4]) for x in ds[self.t1_cn].values]
ds['t1_month'] = [int(str(x)[4:6]) for x in ds[self.t1_cn].values]
ds['t1_day'] = [int(str(x)[6:]) for x in ds[self.t1_cn].values]
ds['t2_year'] = [int(str(x)[0:4]) for x in ds[self.t2_cn].values]
ds['t2_month'] = [int(str(x)[4:6]) for x in ds[self.t2_cn].values]
ds['t2_day'] = [int(str(x)[6:]) for x in ds[self.t2_cn].values]
ds['t1_daysinmonth'] = ds.apply(lambda row: modelsetup.daysinmonth(row['t1_year'], row['t1_month']), axis=1)
ds['t2_daysinmonth'] = ds.apply(lambda row: modelsetup.daysinmonth(row['t2_year'], row['t2_month']), axis=1)
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1'] = ds['t1_year'] + (ds['t1_month'] + ds['t1_day'] / ds['t1_daysinmonth']) / 12
ds['t2'] = ds['t2_year'] + (ds['t2_month'] + ds['t2_day'] / ds['t2_daysinmonth']) / 12
year_decimal_min = dates_table.loc[0,'year'] + dates_table.loc[0,'month'] / 12
year_decimal_max = (dates_table.loc[dates_table.shape[0]-1,'year'] +
(dates_table.loc[dates_table.shape[0]-1,'month'] + 1) / 12)
ds = ds[ds['t1_year'] + ds['t1_month'] / 12 >= year_decimal_min]
ds = ds[ds['t2_year'] + ds['t2_month'] / 12 < year_decimal_max]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['t1_idx'] = ds['t1_idx'].astype(int)
ds['mb_mwe'] = ds[self.mb_mwea_cn] * (ds['t2'] - ds['t1'])
ds['mb_mwe_err'] = ds[self.mb_mwea_err_cn] * (ds['t2'] - ds['t1'])
ds['obs_type'] = 'mb_geo'
elif self.name == 'group':
ds_all = pd.read_csv(self.ds_fp + self.ds_fn, encoding='latin1')
ds_dict_raw = pd.read_csv(self.ds_fp + self.ds_dict_fn)
ds_dict = dict(zip(ds_dict_raw['RGIId'], ds_dict_raw['group_name']))
group_names_unique = list(set(ds_dict.values()))
ds_dict_keyslist = [[] for x in group_names_unique]
for n, group in enumerate(group_names_unique):
ds_dict_keyslist[n] = [group, [k for k, v in ds_dict.items() if v == group]]
ds_all['glaciers_present'] = set(ds_dict_keyslist[n][1]).issubset(main_glac_rgi.RGIId.values.tolist())
ds_all.loc[n, 'first_RGIId'] = ds_dict_keyslist[n][1][0]
ds = ds_all[ds_all.glaciers_present == True].copy()
ds.reset_index(drop=True, inplace=True)
ds['t1_year'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t1_month'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t1_day'] = ds[self.t1_cn].astype(str).str.split('.').str[0].str[6:].astype(int)
ds['t2_year'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[:4].astype(int)
ds['t2_month'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[4:6].astype(int)
ds['t2_day'] = ds[self.t2_cn].astype(str).str.split('.').str[0].str[6:].astype(int)
latdict = dict(zip(main_glac_rgi['RGIId'], main_glac_rgi['CenLat']))
ds['CenLat'] = ds['first_RGIId'].map(latdict)
ds['lat_category'] = np.nan
ds.loc[ds['CenLat'] >= input.lat_threshold, 'lat_category'] = 'northernmost'
ds.loc[(ds['CenLat'] < input.lat_threshold) & (ds['CenLat'] > 0), 'lat_category'] = 'north'
ds.loc[(ds['CenLat'] <= 0) & (ds['CenLat'] > -1*input.lat_threshold), 'lat_category'] = 'south'
ds.loc[ds['CenLat'] <= -1*input.lat_threshold, 'lat_category'] = 'southernmost'
ds['months_wintersummer'] = ds['lat_category'].map(input.monthdict)
ds['winter_begin'] = ds['months_wintersummer'].apply(lambda x: x[0])
ds['winter_end'] = ds['months_wintersummer'].apply(lambda x: x[1])
ds['summer_begin'] = ds['months_wintersummer'].apply(lambda x: x[2])
ds['summer_end'] = ds['months_wintersummer'].apply(lambda x: x[3])
ds.loc[ds['t1_month'] == 99, 't1_month'] = ds.loc[ds['t1_month'] == 99, 'winter_begin']
ds.loc[ds['t1_day'] == 99, 't1_day'] = 1
ds.loc[ds['t2_month'] == 99, 't2_month'] = ds.loc[ds['t2_month'] == 99, 'winter_begin'] - 1
for x in range(ds.shape[0]):
if ds.loc[x, 't2_day'] == 99:
try:
ds.loc[x, 't2_day'] = (
dates_table.loc[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month']), 'daysinmonth']
.values[0])
except:
ds.loc[x, 't2_day'] = 28
ds['t1_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t1_year.values, 'month':ds.t1_month.values, 'day':ds.t1_day.values}))
ds['t2_datetime'] = pd.to_datetime(
pd.DataFrame({'year':ds.t2_year.values, 'month':ds.t2_month.values, 'day':ds.t2_day.values}))
ds['t1_doy'] = ds.t1_datetime.dt.strftime("%j").astype(float)
ds['t2_doy'] = ds.t2_datetime.dt.strftime("%j").astype(float)
ds['t1_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t1_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t2_daysinyear'] = (
(pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':12, 'day':31})) -
pd.to_datetime(pd.DataFrame({'year':ds.t2_year.values, 'month':1, 'day':1}))).dt.days + 1)
ds['t1'] = ds.t1_year + ds.t1_doy / ds.t1_daysinyear
ds['t2'] = ds.t2_year + ds.t2_doy / ds.t2_daysinyear
end_datestable = dates_table.loc[dates_table.shape[0]-1, 'date']
end_datetime = datetime.datetime(end_datestable.year, end_datestable.month + 1, end_datestable.day)
ds = ds[ds['t1_datetime'] >= dates_table.loc[0, 'date']]
ds = ds[ds['t2_datetime'] < end_datetime]
ds.reset_index(drop=True, inplace=True)
ds['t1_idx'] = np.nan
ds['t2_idx'] = np.nan
for x in range(ds.shape[0]):
ds.loc[x,'t1_idx'] = (dates_table[(ds.loc[x, 't1_year'] == dates_table['year']) &
(ds.loc[x, 't1_month'] == dates_table['month'])].index.values[0])
ds.loc[x,'t2_idx'] = (dates_table[(ds.loc[x, 't2_year'] == dates_table['year']) &
(ds.loc[x, 't2_month'] == dates_table['month'])].index.values[0])
ds['mb_mwe'] = np.nan
ds['mb_mwe_err'] = np.nan
ds.loc[ds['dhdt_ma'].notnull(), 'mb_mwe'] = (
ds.loc[ds['dhdt_ma'].notnull(), 'dhdt_ma'] * input.density_ice / input.density_water *
(ds['t2'] - ds['t1']))
ds.loc[ds['dhdt_ma'].notnull(), 'mb_mwe_err'] = (
ds.loc[ds['dhdt_ma'].notnull(), 'dhdt_unc_ma'] * input.density_ice / input.density_water *
(ds['t2'] - ds['t1']))
ds_addcols = [x for x in ds_output_cols if x not in ds.columns.values]
for colname in ds_addcols:
ds[colname] = np.nan
ds_output = ds[ds_output_cols].sort_values(['glacno', 't1_idx'])
ds_output.reset_index(drop=True, inplace=True)
return ds_output
def select_best_mb(cal_data):
cal_data['dt'] = cal_data['t2'] - cal_data['t1']
rgiids = list(cal_data.RGIId.values)
rgiids_count = collections.Counter(rgiids)
rgiids_multiple = []
rgiids_single_idx = []
cal_data_rgiids_all = list(cal_data.RGIId.values)
for x in rgiids_count:
if rgiids_count[x] > 1:
rgiids_multiple.append(x)
else:
rgiids_single_idx.append(cal_data_rgiids_all.index(x))
rgiids_multiple = sorted(rgiids_multiple)
rgiids_single_idx = sorted(rgiids_single_idx)
cal_data_best = cal_data.loc[rgiids_single_idx,:]
for rgiid in rgiids_multiple:
cal_data_multiple = cal_data[cal_data['RGIId'] == rgiid]
if 'mb_geo' in list(cal_data_multiple.obs_type.values):
cal_data_multiple = cal_data_multiple[cal_data_multiple.obs_type == 'mb_geo']
cal_data_append = cal_data_multiple[cal_data_multiple.dt == cal_data_multiple.dt.max()]
cal_data_best = pd.concat([cal_data_best, cal_data_append], axis=0)
cal_data_best = cal_data_best.sort_values(by=['RGIId'])
cal_data_best.reset_index(inplace=True, drop=True)
return cal_data_best
if __name__ == '__main__':
rgi_regionsO1 = [1]
rgi_glac_number = 'all'
glac_no = input.glac_no
startyear = 1950
endyear = 2018
_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=rgi_regionsO1, rgi_regionsO2 = 'all',
rgi_glac_number=rgi_glac_number, glac_no=input.glac_no)
main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, input.hyps_filedict,
input.hyps_colsdrop)
dates_table = modelsetup.datesmodelrun(startyear, endyear, spinupyears=0, option_wateryear=3)
elev_bins = main_glac_hyps.columns.values.astype(int)
elev_bin_interval = elev_bins[1] - elev_bins[0]
cal_datasets = ['braun']
cal_data = pd.DataFrame()
for dataset in cal_datasets:
cal_subset = MBData(name=dataset)
cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table)
cal_data = cal_data.append(cal_subset_data, ignore_index=True)
glacno_unique = list(cal_subset_data.glacno.unique())
main_glac_rgi_cal = modelsetup.selectglaciersrgitable(glac_no = glacno_unique)
print(dataset, '- glacier area covered: ',
np.round(main_glac_rgi_cal.Area.sum() / main_glac_rgi.Area.sum() * 100,1),'%')
cal_data = cal_data.sort_values(['glacno', 't1_idx'])
cal_data.reset_index(drop=True, inplace=True)
if len(cal_datasets) > 1:
glacno_unique = list(cal_data.glacno.unique())
main_glac_rgi_cal = modelsetup.selectglaciersrgitable(glac_no = glacno_unique)
print('All datasets glacier area covered: ',
np.round(main_glac_rgi_cal.Area.sum() / main_glac_rgi.Area.sum() * 100,1),'%')
| true | true |
f7f4b2b7173094e5bfb49c7d2a15f48d8f379b92 | 829 | py | Python | gluon/tests/base.py | lfntac/ipv6 | 1cf305a5fe370e71157723a40833c73aeffdf35e | [
"Apache-2.0"
] | null | null | null | gluon/tests/base.py | lfntac/ipv6 | 1cf305a5fe370e71157723a40833c73aeffdf35e | [
"Apache-2.0"
] | null | null | null | gluon/tests/base.py | lfntac/ipv6 | 1cf305a5fe370e71157723a40833c73aeffdf35e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
def setup(self):
pass
| 30.703704 | 75 | 0.73462 |
from oslotest import base
class TestCase(base.BaseTestCase):
def setup(self):
pass
| true | true |
f7f4b4de48a8b9fd3c5edf9a5244dc403e239a3f | 25,248 | py | Python | orgbook-issuer-agent/issuer_controller/src/issuer.py | brianorwhatever/jag-lcrb-carla-public | 8146cb866cfc9ba54b571e29738046ee068a140d | [
"Apache-2.0"
] | null | null | null | orgbook-issuer-agent/issuer_controller/src/issuer.py | brianorwhatever/jag-lcrb-carla-public | 8146cb866cfc9ba54b571e29738046ee068a140d | [
"Apache-2.0"
] | null | null | null | orgbook-issuer-agent/issuer_controller/src/issuer.py | brianorwhatever/jag-lcrb-carla-public | 8146cb866cfc9ba54b571e29738046ee068a140d | [
"Apache-2.0"
] | null | null | null | import json
import os
import threading
import time
import logging
import requests
from flask import jsonify
import config
AGENT_ADMIN_API_KEY = os.environ.get("AGENT_ADMIN_API_KEY")
ADMIN_REQUEST_HEADERS = {"Content-Type": "application/json"}
if AGENT_ADMIN_API_KEY is not None and 0 < len(AGENT_ADMIN_API_KEY):
ADMIN_REQUEST_HEADERS["x-api-key"] = AGENT_ADMIN_API_KEY
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper()
LOGGER = logging.getLogger(__name__)
TRACE_EVENTS = os.getenv("TRACE_EVENTS", "False").lower() == "true"
if TRACE_EVENTS:
LOGGER.setLevel(logging.INFO)
TOB_ADMIN_API_KEY = os.environ.get("TOB_ADMIN_API_KEY")
TOB_REQUEST_HEADERS = {}
if TOB_ADMIN_API_KEY is not None and 0 < len(TOB_ADMIN_API_KEY):
TOB_REQUEST_HEADERS = {"x-api-key": TOB_ADMIN_API_KEY}
# list of cred defs per schema name/version
app_config = {}
app_config["schemas"] = {}
synced = {}
MAX_RETRIES = 3
def agent_post_with_retry(url, payload, headers=None):
retries = 0
while True:
try:
# test code to test exception handling
# if retries < MAX_RETRIES:
# raise Exception("Fake exception!!!")
response = requests.post(url, payload, headers=headers)
response.raise_for_status()
return response
except Exception as e:
print("Error posting", url, e)
retries = retries + 1
if retries > MAX_RETRIES:
raise e
time.sleep(5)
def agent_schemas_cred_defs(agent_admin_url):
ret_schemas = {}
# get loaded cred defs and schemas
response = requests.get(
agent_admin_url + "/schemas/created", headers=ADMIN_REQUEST_HEADERS
)
response.raise_for_status()
schemas = response.json()["schema_ids"]
for schema_id in schemas:
response = requests.get(
agent_admin_url + "/schemas/" + schema_id, headers=ADMIN_REQUEST_HEADERS
)
response.raise_for_status()
schema = response.json()["schema"]
schema_key = schema["name"] + "::" + schema["version"]
ret_schemas[schema_key] = {"schema": schema, "schema_id": str(schema["seqNo"])}
response = requests.get(
agent_admin_url + "/credential-definitions/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_defs = response.json()["credential_definition_ids"]
for cred_def_id in cred_defs:
response = requests.get(
agent_admin_url + "/credential-definitions/" + cred_def_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_def = response.json()["credential_definition"]
for schema_key in ret_schemas:
if ret_schemas[schema_key]["schema_id"] == cred_def["schemaId"]:
ret_schemas[schema_key]["cred_def"] = cred_def
break
return ret_schemas
class StartupProcessingThread(threading.Thread):
global app_config
def __init__(self, ENV):
threading.Thread.__init__(self)
self.ENV = ENV
def run(self):
# read configuration files
config_root = self.ENV.get("CONFIG_ROOT", "../config")
config_schemas = config.load_config(config_root + "/schemas.yml", env=self.ENV)
config_services = config.load_config(
config_root + "/services.yml", env=self.ENV
)
# print("schemas.yml -->", json.dumps(config_schemas))
# print("services.yml -->", json.dumps(config_services))
agent_admin_url = self.ENV.get("AGENT_ADMIN_URL")
if not agent_admin_url:
raise RuntimeError(
"Error AGENT_ADMIN_URL is not specified, can't connect to Agent."
)
app_config["AGENT_ADMIN_URL"] = agent_admin_url
# get public DID from our agent
response = requests.get(
agent_admin_url + "/wallet/did/public", headers=ADMIN_REQUEST_HEADERS
)
result = response.json()
did = result["result"]
print("Fetched DID from agent: ", did)
app_config["DID"] = did["did"]
# determine pre-registered schemas and cred defs
existing_schemas = agent_schemas_cred_defs(agent_admin_url)
print("Existing schemas:", json.dumps(existing_schemas))
# register schemas and credential definitions
for schema in config_schemas:
schema_name = schema["name"]
schema_version = schema["version"]
schema_key = schema_name + "::" + schema_version
if schema_key not in existing_schemas:
schema_attrs = []
schema_descs = {}
if isinstance(schema["attributes"], dict):
# each element is a dict
for attr, desc in schema["attributes"].items():
schema_attrs.append(attr)
schema_descs[attr] = desc
else:
# assume it's an array
for attr in schema["attributes"]:
schema_attrs.append(attr)
# register our schema(s) and credential definition(s)
schema_request = {
"schema_name": schema_name,
"schema_version": schema_version,
"attributes": schema_attrs,
}
response = agent_post_with_retry(
agent_admin_url + "/schemas",
json.dumps(schema_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema_id = response.json()
else:
schema_id = {"schema_id": existing_schemas[schema_key]["schema"]["id"]}
app_config["schemas"]["SCHEMA_" + schema_name] = schema
app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
] = schema_id["schema_id"]
print("Registered schema: ", schema_id)
if (
schema_key not in existing_schemas
or "cred_def" not in existing_schemas[schema_key]
):
cred_def_request = {"schema_id": schema_id["schema_id"]}
response = agent_post_with_retry(
agent_admin_url + "/credential-definitions",
json.dumps(cred_def_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
credential_definition_id = response.json()
else:
credential_definition_id = {
"credential_definition_id": existing_schemas[schema_key][
"cred_def"
]["id"]
}
app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_version
] = credential_definition_id["credential_definition_id"]
print("Registered credential definition: ", credential_definition_id)
# what is the TOB connection name?
tob_connection_params = config_services["verifiers"]["bctob"]
# check if we have a TOB connection
response = requests.get(
agent_admin_url + "/connections?alias=" + tob_connection_params["alias"],
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
connections = response.json()["results"]
tob_connection = None
for connection in connections:
# check for TOB connection
if connection["alias"] == tob_connection_params["alias"]:
tob_connection = connection
if not tob_connection:
# if no tob connection then establish one
tob_agent_admin_url = tob_connection_params["connection"]["agent_admin_url"]
if not tob_agent_admin_url:
raise RuntimeError(
"Error TOB_AGENT_ADMIN_URL is not specified, can't establish a TOB connection."
)
response = requests.post(
tob_agent_admin_url + "/connections/create-invitation",
headers=TOB_REQUEST_HEADERS,
)
response.raise_for_status()
invitation = response.json()
response = requests.post(
agent_admin_url
+ "/connections/receive-invitation?alias="
+ tob_connection_params["alias"],
json.dumps(invitation["invitation"]),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
tob_connection = response.json()
print("Established tob connection: ", tob_connection)
time.sleep(5)
app_config["TOB_CONNECTION"] = tob_connection["connection_id"]
synced[tob_connection["connection_id"]] = False
for issuer_name, issuer_info in config_services["issuers"].items():
# register ourselves (issuer, schema(s), cred def(s)) with TOB
issuer_config = {
"name": issuer_name,
"did": app_config["DID"],
"config_root": config_root,
}
issuer_config.update(issuer_info)
issuer_spec = config.assemble_issuer_spec(issuer_config)
credential_types = []
for credential_type in issuer_info["credential_types"]:
schema_name = credential_type["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
ctype_config = {
"schema_name": schema_name,
"schema_version": schema_info["version"],
"issuer_url": issuer_config["url"],
"config_root": config_root,
"credential_def_id": app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_info["version"]
],
}
credential_type['attributes'] = schema_info["attributes"]
ctype_config.update(credential_type)
ctype = config.assemble_credential_type_spec(ctype_config, schema_info.get("attributes"))
if ctype is not None:
credential_types.append(ctype)
issuer_request = {
"connection_id": app_config["TOB_CONNECTION"],
"issuer_registration": {
"credential_types": credential_types,
"issuer": issuer_spec,
},
}
print(json.dumps(issuer_request))
response = requests.post(
agent_admin_url + "/issuer_registration/send",
json.dumps(issuer_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
response.json()
print("Registered issuer: ", issuer_name)
synced[tob_connection["connection_id"]] = True
print("Connection {} is synchronized".format(tob_connection))
def tob_connection_synced():
return (
("TOB_CONNECTION" in app_config)
and (app_config["TOB_CONNECTION"] in synced)
and (synced[app_config["TOB_CONNECTION"]])
)
def startup_init(ENV):
global app_config
thread = StartupProcessingThread(ENV)
thread.start()
credential_lock = threading.Lock()
credential_requests = {}
credential_responses = {}
credential_threads = {}
timing_lock = threading.Lock()
record_timings = True
timings = {}
def clear_stats():
global timings
timing_lock.acquire()
try:
timings = {}
finally:
timing_lock.release()
def get_stats():
timing_lock.acquire()
try:
return timings
finally:
timing_lock.release()
def log_timing_method(method, start_time, end_time, success, data=None):
if not record_timings:
return
timing_lock.acquire()
try:
elapsed_time = end_time - start_time
if not method in timings:
timings[method] = {
"total_count": 1,
"success_count": 1 if success else 0,
"fail_count": 0 if success else 1,
"min_time": elapsed_time,
"max_time": elapsed_time,
"total_time": elapsed_time,
"avg_time": elapsed_time,
"data": {},
}
else:
timings[method]["total_count"] = timings[method]["total_count"] + 1
if success:
timings[method]["success_count"] = timings[method]["success_count"] + 1
else:
timings[method]["fail_count"] = timings[method]["fail_count"] + 1
if elapsed_time > timings[method]["max_time"]:
timings[method]["max_time"] = elapsed_time
if elapsed_time < timings[method]["min_time"]:
timings[method]["min_time"] = elapsed_time
timings[method]["total_time"] = timings[method]["total_time"] + elapsed_time
timings[method]["avg_time"] = (
timings[method]["total_time"] / timings[method]["total_count"]
)
if data:
timings[method]["data"][str(timings[method]["total_count"])] = data
finally:
timing_lock.release()
def set_credential_thread_id(cred_exch_id, thread_id):
credential_lock.acquire()
try:
# add 2 records so we can x-ref
print("Set cred_exch_id, thread_id", cred_exch_id, thread_id)
credential_threads[thread_id] = cred_exch_id
credential_threads[cred_exch_id] = thread_id
finally:
credential_lock.release()
def add_credential_request(cred_exch_id):
credential_lock.acquire()
try:
# short circuit if we already have the response
if cred_exch_id in credential_responses:
return None
result_available = threading.Event()
credential_requests[cred_exch_id] = result_available
return result_available
finally:
credential_lock.release()
def add_credential_response(cred_exch_id, response):
credential_lock.acquire()
try:
credential_responses[cred_exch_id] = response
if cred_exch_id in credential_requests:
result_available = credential_requests[cred_exch_id]
result_available.set()
del credential_requests[cred_exch_id]
finally:
credential_lock.release()
def add_credential_problem_report(thread_id, response):
print("get problem report for thread", thread_id)
if thread_id in credential_threads:
cred_exch_id = credential_threads[thread_id]
add_credential_response(cred_exch_id, response)
else:
print("thread_id not found", thread_id)
# hack for now
if 1 == len(list(credential_requests.keys())):
cred_exch_id = list(credential_requests.keys())[0]
add_credential_response(cred_exch_id, response)
else:
print("darn, too many outstanding requests :-(")
print(credential_requests)
def add_credential_timeout_report(cred_exch_id):
print("add timeout report for cred", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::Error thread timeout"}
add_credential_response(cred_exch_id, response)
def add_credential_exception_report(cred_exch_id, exc):
print("add exception report for cred", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::" + str(exc)}
add_credential_response(cred_exch_id, response)
def get_credential_response(cred_exch_id):
credential_lock.acquire()
try:
if cred_exch_id in credential_responses:
response = credential_responses[cred_exch_id]
del credential_responses[cred_exch_id]
if cred_exch_id in credential_threads:
thread_id = credential_threads[cred_exch_id]
print("cleaning out cred_exch_id, thread_id", cred_exch_id, thread_id)
del credential_threads[cred_exch_id]
del credential_threads[thread_id]
return response
else:
return None
finally:
credential_lock.release()
TOPIC_CONNECTIONS = "connections"
TOPIC_CONNECTIONS_ACTIVITY = "connections_actvity"
TOPIC_CREDENTIALS = "issue_credential"
TOPIC_PRESENTATIONS = "presentations"
TOPIC_GET_ACTIVE_MENU = "get-active-menu"
TOPIC_PERFORM_MENU_ACTION = "perform-menu-action"
TOPIC_ISSUER_REGISTRATION = "issuer_registration"
TOPIC_PROBLEM_REPORT = "problem_report"
# max 15 second wait for a credential response (prevents blocking forever)
MAX_CRED_RESPONSE_TIMEOUT = 45
def handle_connections(state, message):
# TODO auto-accept?
print("handle_connections()", state)
return jsonify({"message": state})
def handle_credentials(state, message):
# TODO auto-respond to proof requests
print("handle_credentials()", state, message["credential_exchange_id"])
# TODO new "stored" state is being added by Nick
if "thread_id" in message:
set_credential_thread_id(
message["credential_exchange_id"], message["thread_id"]
)
if state == "credential_acked":
response = {"success": True, "result": message["credential_exchange_id"]}
add_credential_response(message["credential_exchange_id"], response)
return jsonify({"message": state})
def handle_presentations(state, message):
# TODO auto-respond to proof requests
print("handle_presentations()", state)
return jsonify({"message": state})
def handle_get_active_menu(message):
# TODO add/update issuer info?
print("handle_get_active_menu()", message)
return jsonify({})
def handle_perform_menu_action(message):
# TODO add/update issuer info?
print("handle_perform_menu_action()", message)
return jsonify({})
def handle_register_issuer(message):
# TODO add/update issuer info?
print("handle_register_issuer()")
return jsonify({})
def handle_problem_report(message):
print("handle_problem_report()", message)
msg = message["~thread"]["thid"] + "::" + message["explain-ltxt"]
response = {"success": False, "result": msg}
add_credential_problem_report(message["~thread"]["thid"], response)
return jsonify({})
class SendCredentialThread(threading.Thread):
def __init__(self, credential_definition_id, cred_offer, url, headers):
threading.Thread.__init__(self)
self.credential_definition_id = credential_definition_id
self.cred_offer = cred_offer
self.url = url
self.headers = headers
def run(self):
start_time = time.perf_counter()
method = "submit_credential.credential"
cred_data = None
try:
response = requests.post(
self.url, json.dumps(self.cred_offer), headers=self.headers
)
response.raise_for_status()
cred_data = response.json()
result_available = add_credential_request(
cred_data["credential_exchange_id"]
)
# print(
# "Sent offer",
# cred_data["credential_exchange_id"],
# cred_data["connection_id"],
# )
# wait for confirmation from the agent, which will include the credential exchange id
if result_available and not result_available.wait(
MAX_CRED_RESPONSE_TIMEOUT
):
add_credential_timeout_report(cred_data["credential_exchange_id"])
end_time = time.perf_counter()
print(
"Got credential TIMEOUT:",
cred_data["credential_exchange_id"],
cred_data["connection_id"],
)
log_timing_method(
method,
start_time,
end_time,
False,
data={
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": "Timeout",
"elapsed_time": (end_time - start_time),
},
)
else:
# print(
# "Got credential response:",
# cred_data["credential_exchange_id"],
# cred_data["connection_id"],
# )
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, True)
pass
except Exception as exc:
print(exc)
end_time = time.perf_counter()
# if cred_data is not set we don't have a credential to set status for
if cred_data:
add_credential_exception_report(
cred_data["credential_exchange_id"], exc
)
data = {
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": str(exc),
"elapsed_time": (end_time - start_time),
}
else:
data = {"Error": str(exc), "elapsed_time": (end_time - start_time)}
log_timing_method(method, start_time, end_time, False, data=data)
# don't re-raise; we want to log the exception as the credential error response
self.cred_response = get_credential_response(
cred_data["credential_exchange_id"]
)
processing_time = end_time - start_time
# print("Got response", self.cred_response, "time=", processing_time)
def handle_send_credential(cred_input):
"""
# other sample data
sample_credentials = [
{
"schema": "ian-registration.ian-ville",
"version": "1.0.0",
"attributes": {
"corp_num": "ABC12345",
"registration_date": "2018-01-01",
"entity_name": "Ima Permit",
"entity_name_effective": "2018-01-01",
"entity_status": "ACT",
"entity_status_effective": "2019-01-01",
"entity_type": "ABC",
"registered_jurisdiction": "BC",
"effective_date": "2019-01-01",
"expiry_date": ""
}
},
{
"schema": "ian-permit.ian-ville",
"version": "1.0.0",
"attributes": {
"permit_id": str(uuid.uuid4()),
"entity_name": "Ima Permit",
"corp_num": "ABC12345",
"permit_issued_date": "2018-01-01",
"permit_type": "ABC",
"permit_status": "OK",
"effective_date": "2019-01-01"
}
}
]
"""
# construct and send the credential
# print("Received credentials", cred_input)
global app_config
agent_admin_url = app_config["AGENT_ADMIN_URL"]
start_time = time.perf_counter()
processing_time = 0
processed_count = 0
# let's send a credential!
cred_responses = []
for credential in cred_input:
cred_def_key = "CRED_DEF_" + credential["schema"] + "_" + credential["version"]
credential_definition_id = app_config["schemas"][cred_def_key]
schema_name = credential["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
schema_version = schema_info["version"]
schema_id = app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
]
cred_req = {
"schema_issuer_did": app_config["DID"],
"issuer_did": app_config["DID"],
"schema_name": schema_name,
"cred_def_id": credential_definition_id,
"schema_version": schema_version,
"credential_proposal": {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview",
"attributes": [
{"name": attr_name, "value": attr_value}
for attr_name, attr_value in credential["attributes"].items()
],
},
"connection_id": app_config["TOB_CONNECTION"],
# "comment": "string",
"schema_id": schema_id,
}
if TRACE_EVENTS:
cred_req["trace"] = True
thread = SendCredentialThread(
credential_definition_id,
cred_req,
agent_admin_url + "/issue-credential/send",
ADMIN_REQUEST_HEADERS,
)
thread.start()
thread.join()
cred_responses.append(thread.cred_response)
processed_count = processed_count + 1
processing_time = time.perf_counter() - start_time
print(">>> Processed", processed_count, "credentials in", processing_time)
print(" ", processing_time / processed_count, "seconds per credential")
return jsonify(cred_responses) | 35.965812 | 105 | 0.592918 | import json
import os
import threading
import time
import logging
import requests
from flask import jsonify
import config
AGENT_ADMIN_API_KEY = os.environ.get("AGENT_ADMIN_API_KEY")
ADMIN_REQUEST_HEADERS = {"Content-Type": "application/json"}
if AGENT_ADMIN_API_KEY is not None and 0 < len(AGENT_ADMIN_API_KEY):
ADMIN_REQUEST_HEADERS["x-api-key"] = AGENT_ADMIN_API_KEY
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper()
LOGGER = logging.getLogger(__name__)
TRACE_EVENTS = os.getenv("TRACE_EVENTS", "False").lower() == "true"
if TRACE_EVENTS:
LOGGER.setLevel(logging.INFO)
TOB_ADMIN_API_KEY = os.environ.get("TOB_ADMIN_API_KEY")
TOB_REQUEST_HEADERS = {}
if TOB_ADMIN_API_KEY is not None and 0 < len(TOB_ADMIN_API_KEY):
TOB_REQUEST_HEADERS = {"x-api-key": TOB_ADMIN_API_KEY}
app_config = {}
app_config["schemas"] = {}
synced = {}
MAX_RETRIES = 3
def agent_post_with_retry(url, payload, headers=None):
retries = 0
while True:
try:
response = requests.post(url, payload, headers=headers)
response.raise_for_status()
return response
except Exception as e:
print("Error posting", url, e)
retries = retries + 1
if retries > MAX_RETRIES:
raise e
time.sleep(5)
def agent_schemas_cred_defs(agent_admin_url):
ret_schemas = {}
response = requests.get(
agent_admin_url + "/schemas/created", headers=ADMIN_REQUEST_HEADERS
)
response.raise_for_status()
schemas = response.json()["schema_ids"]
for schema_id in schemas:
response = requests.get(
agent_admin_url + "/schemas/" + schema_id, headers=ADMIN_REQUEST_HEADERS
)
response.raise_for_status()
schema = response.json()["schema"]
schema_key = schema["name"] + "::" + schema["version"]
ret_schemas[schema_key] = {"schema": schema, "schema_id": str(schema["seqNo"])}
response = requests.get(
agent_admin_url + "/credential-definitions/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_defs = response.json()["credential_definition_ids"]
for cred_def_id in cred_defs:
response = requests.get(
agent_admin_url + "/credential-definitions/" + cred_def_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_def = response.json()["credential_definition"]
for schema_key in ret_schemas:
if ret_schemas[schema_key]["schema_id"] == cred_def["schemaId"]:
ret_schemas[schema_key]["cred_def"] = cred_def
break
return ret_schemas
class StartupProcessingThread(threading.Thread):
global app_config
def __init__(self, ENV):
threading.Thread.__init__(self)
self.ENV = ENV
def run(self):
config_root = self.ENV.get("CONFIG_ROOT", "../config")
config_schemas = config.load_config(config_root + "/schemas.yml", env=self.ENV)
config_services = config.load_config(
config_root + "/services.yml", env=self.ENV
)
agent_admin_url = self.ENV.get("AGENT_ADMIN_URL")
if not agent_admin_url:
raise RuntimeError(
"Error AGENT_ADMIN_URL is not specified, can't connect to Agent."
)
app_config["AGENT_ADMIN_URL"] = agent_admin_url
# get public DID from our agent
response = requests.get(
agent_admin_url + "/wallet/did/public", headers=ADMIN_REQUEST_HEADERS
)
result = response.json()
did = result["result"]
print("Fetched DID from agent: ", did)
app_config["DID"] = did["did"]
# determine pre-registered schemas and cred defs
existing_schemas = agent_schemas_cred_defs(agent_admin_url)
print("Existing schemas:", json.dumps(existing_schemas))
# register schemas and credential definitions
for schema in config_schemas:
schema_name = schema["name"]
schema_version = schema["version"]
schema_key = schema_name + "::" + schema_version
if schema_key not in existing_schemas:
schema_attrs = []
schema_descs = {}
if isinstance(schema["attributes"], dict):
# each element is a dict
for attr, desc in schema["attributes"].items():
schema_attrs.append(attr)
schema_descs[attr] = desc
else:
# assume it's an array
for attr in schema["attributes"]:
schema_attrs.append(attr)
schema_request = {
"schema_name": schema_name,
"schema_version": schema_version,
"attributes": schema_attrs,
}
response = agent_post_with_retry(
agent_admin_url + "/schemas",
json.dumps(schema_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema_id = response.json()
else:
schema_id = {"schema_id": existing_schemas[schema_key]["schema"]["id"]}
app_config["schemas"]["SCHEMA_" + schema_name] = schema
app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
] = schema_id["schema_id"]
print("Registered schema: ", schema_id)
if (
schema_key not in existing_schemas
or "cred_def" not in existing_schemas[schema_key]
):
cred_def_request = {"schema_id": schema_id["schema_id"]}
response = agent_post_with_retry(
agent_admin_url + "/credential-definitions",
json.dumps(cred_def_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
credential_definition_id = response.json()
else:
credential_definition_id = {
"credential_definition_id": existing_schemas[schema_key][
"cred_def"
]["id"]
}
app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_version
] = credential_definition_id["credential_definition_id"]
print("Registered credential definition: ", credential_definition_id)
tob_connection_params = config_services["verifiers"]["bctob"]
response = requests.get(
agent_admin_url + "/connections?alias=" + tob_connection_params["alias"],
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
connections = response.json()["results"]
tob_connection = None
for connection in connections:
if connection["alias"] == tob_connection_params["alias"]:
tob_connection = connection
if not tob_connection:
tob_agent_admin_url = tob_connection_params["connection"]["agent_admin_url"]
if not tob_agent_admin_url:
raise RuntimeError(
"Error TOB_AGENT_ADMIN_URL is not specified, can't establish a TOB connection."
)
response = requests.post(
tob_agent_admin_url + "/connections/create-invitation",
headers=TOB_REQUEST_HEADERS,
)
response.raise_for_status()
invitation = response.json()
response = requests.post(
agent_admin_url
+ "/connections/receive-invitation?alias="
+ tob_connection_params["alias"],
json.dumps(invitation["invitation"]),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
tob_connection = response.json()
print("Established tob connection: ", tob_connection)
time.sleep(5)
app_config["TOB_CONNECTION"] = tob_connection["connection_id"]
synced[tob_connection["connection_id"]] = False
for issuer_name, issuer_info in config_services["issuers"].items():
# register ourselves (issuer, schema(s), cred def(s)) with TOB
issuer_config = {
"name": issuer_name,
"did": app_config["DID"],
"config_root": config_root,
}
issuer_config.update(issuer_info)
issuer_spec = config.assemble_issuer_spec(issuer_config)
credential_types = []
for credential_type in issuer_info["credential_types"]:
schema_name = credential_type["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
ctype_config = {
"schema_name": schema_name,
"schema_version": schema_info["version"],
"issuer_url": issuer_config["url"],
"config_root": config_root,
"credential_def_id": app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_info["version"]
],
}
credential_type['attributes'] = schema_info["attributes"]
ctype_config.update(credential_type)
ctype = config.assemble_credential_type_spec(ctype_config, schema_info.get("attributes"))
if ctype is not None:
credential_types.append(ctype)
issuer_request = {
"connection_id": app_config["TOB_CONNECTION"],
"issuer_registration": {
"credential_types": credential_types,
"issuer": issuer_spec,
},
}
print(json.dumps(issuer_request))
response = requests.post(
agent_admin_url + "/issuer_registration/send",
json.dumps(issuer_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
response.json()
print("Registered issuer: ", issuer_name)
synced[tob_connection["connection_id"]] = True
print("Connection {} is synchronized".format(tob_connection))
def tob_connection_synced():
return (
("TOB_CONNECTION" in app_config)
and (app_config["TOB_CONNECTION"] in synced)
and (synced[app_config["TOB_CONNECTION"]])
)
def startup_init(ENV):
global app_config
thread = StartupProcessingThread(ENV)
thread.start()
credential_lock = threading.Lock()
credential_requests = {}
credential_responses = {}
credential_threads = {}
timing_lock = threading.Lock()
record_timings = True
timings = {}
def clear_stats():
global timings
timing_lock.acquire()
try:
timings = {}
finally:
timing_lock.release()
def get_stats():
timing_lock.acquire()
try:
return timings
finally:
timing_lock.release()
def log_timing_method(method, start_time, end_time, success, data=None):
if not record_timings:
return
timing_lock.acquire()
try:
elapsed_time = end_time - start_time
if not method in timings:
timings[method] = {
"total_count": 1,
"success_count": 1 if success else 0,
"fail_count": 0 if success else 1,
"min_time": elapsed_time,
"max_time": elapsed_time,
"total_time": elapsed_time,
"avg_time": elapsed_time,
"data": {},
}
else:
timings[method]["total_count"] = timings[method]["total_count"] + 1
if success:
timings[method]["success_count"] = timings[method]["success_count"] + 1
else:
timings[method]["fail_count"] = timings[method]["fail_count"] + 1
if elapsed_time > timings[method]["max_time"]:
timings[method]["max_time"] = elapsed_time
if elapsed_time < timings[method]["min_time"]:
timings[method]["min_time"] = elapsed_time
timings[method]["total_time"] = timings[method]["total_time"] + elapsed_time
timings[method]["avg_time"] = (
timings[method]["total_time"] / timings[method]["total_count"]
)
if data:
timings[method]["data"][str(timings[method]["total_count"])] = data
finally:
timing_lock.release()
def set_credential_thread_id(cred_exch_id, thread_id):
credential_lock.acquire()
try:
# add 2 records so we can x-ref
print("Set cred_exch_id, thread_id", cred_exch_id, thread_id)
credential_threads[thread_id] = cred_exch_id
credential_threads[cred_exch_id] = thread_id
finally:
credential_lock.release()
def add_credential_request(cred_exch_id):
credential_lock.acquire()
try:
# short circuit if we already have the response
if cred_exch_id in credential_responses:
return None
result_available = threading.Event()
credential_requests[cred_exch_id] = result_available
return result_available
finally:
credential_lock.release()
def add_credential_response(cred_exch_id, response):
credential_lock.acquire()
try:
credential_responses[cred_exch_id] = response
if cred_exch_id in credential_requests:
result_available = credential_requests[cred_exch_id]
result_available.set()
del credential_requests[cred_exch_id]
finally:
credential_lock.release()
def add_credential_problem_report(thread_id, response):
print("get problem report for thread", thread_id)
if thread_id in credential_threads:
cred_exch_id = credential_threads[thread_id]
add_credential_response(cred_exch_id, response)
else:
print("thread_id not found", thread_id)
# hack for now
if 1 == len(list(credential_requests.keys())):
cred_exch_id = list(credential_requests.keys())[0]
add_credential_response(cred_exch_id, response)
else:
print("darn, too many outstanding requests :-(")
print(credential_requests)
def add_credential_timeout_report(cred_exch_id):
print("add timeout report for cred", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::Error thread timeout"}
add_credential_response(cred_exch_id, response)
def add_credential_exception_report(cred_exch_id, exc):
print("add exception report for cred", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::" + str(exc)}
add_credential_response(cred_exch_id, response)
def get_credential_response(cred_exch_id):
credential_lock.acquire()
try:
if cred_exch_id in credential_responses:
response = credential_responses[cred_exch_id]
del credential_responses[cred_exch_id]
if cred_exch_id in credential_threads:
thread_id = credential_threads[cred_exch_id]
print("cleaning out cred_exch_id, thread_id", cred_exch_id, thread_id)
del credential_threads[cred_exch_id]
del credential_threads[thread_id]
return response
else:
return None
finally:
credential_lock.release()
TOPIC_CONNECTIONS = "connections"
TOPIC_CONNECTIONS_ACTIVITY = "connections_actvity"
TOPIC_CREDENTIALS = "issue_credential"
TOPIC_PRESENTATIONS = "presentations"
TOPIC_GET_ACTIVE_MENU = "get-active-menu"
TOPIC_PERFORM_MENU_ACTION = "perform-menu-action"
TOPIC_ISSUER_REGISTRATION = "issuer_registration"
TOPIC_PROBLEM_REPORT = "problem_report"
# max 15 second wait for a credential response (prevents blocking forever)
MAX_CRED_RESPONSE_TIMEOUT = 45
def handle_connections(state, message):
# TODO auto-accept?
print("handle_connections()", state)
return jsonify({"message": state})
def handle_credentials(state, message):
# TODO auto-respond to proof requests
print("handle_credentials()", state, message["credential_exchange_id"])
# TODO new "stored" state is being added by Nick
if "thread_id" in message:
set_credential_thread_id(
message["credential_exchange_id"], message["thread_id"]
)
if state == "credential_acked":
response = {"success": True, "result": message["credential_exchange_id"]}
add_credential_response(message["credential_exchange_id"], response)
return jsonify({"message": state})
def handle_presentations(state, message):
# TODO auto-respond to proof requests
print("handle_presentations()", state)
return jsonify({"message": state})
def handle_get_active_menu(message):
# TODO add/update issuer info?
print("handle_get_active_menu()", message)
return jsonify({})
def handle_perform_menu_action(message):
# TODO add/update issuer info?
print("handle_perform_menu_action()", message)
return jsonify({})
def handle_register_issuer(message):
# TODO add/update issuer info?
print("handle_register_issuer()")
return jsonify({})
def handle_problem_report(message):
print("handle_problem_report()", message)
msg = message["~thread"]["thid"] + "::" + message["explain-ltxt"]
response = {"success": False, "result": msg}
add_credential_problem_report(message["~thread"]["thid"], response)
return jsonify({})
class SendCredentialThread(threading.Thread):
def __init__(self, credential_definition_id, cred_offer, url, headers):
threading.Thread.__init__(self)
self.credential_definition_id = credential_definition_id
self.cred_offer = cred_offer
self.url = url
self.headers = headers
def run(self):
start_time = time.perf_counter()
method = "submit_credential.credential"
cred_data = None
try:
response = requests.post(
self.url, json.dumps(self.cred_offer), headers=self.headers
)
response.raise_for_status()
cred_data = response.json()
result_available = add_credential_request(
cred_data["credential_exchange_id"]
)
# print(
# "Sent offer",
# cred_data["credential_exchange_id"],
# cred_data["connection_id"],
# )
# wait for confirmation from the agent, which will include the credential exchange id
if result_available and not result_available.wait(
MAX_CRED_RESPONSE_TIMEOUT
):
add_credential_timeout_report(cred_data["credential_exchange_id"])
end_time = time.perf_counter()
print(
"Got credential TIMEOUT:",
cred_data["credential_exchange_id"],
cred_data["connection_id"],
)
log_timing_method(
method,
start_time,
end_time,
False,
data={
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": "Timeout",
"elapsed_time": (end_time - start_time),
},
)
else:
# print(
# "Got credential response:",
# cred_data["credential_exchange_id"],
# cred_data["connection_id"],
# )
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, True)
pass
except Exception as exc:
print(exc)
end_time = time.perf_counter()
# if cred_data is not set we don't have a credential to set status for
if cred_data:
add_credential_exception_report(
cred_data["credential_exchange_id"], exc
)
data = {
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": str(exc),
"elapsed_time": (end_time - start_time),
}
else:
data = {"Error": str(exc), "elapsed_time": (end_time - start_time)}
log_timing_method(method, start_time, end_time, False, data=data)
self.cred_response = get_credential_response(
cred_data["credential_exchange_id"]
)
processing_time = end_time - start_time
# print("Got response", self.cred_response, "time=", processing_time)
def handle_send_credential(cred_input):
# construct and send the credential
# print("Received credentials", cred_input)
global app_config
agent_admin_url = app_config["AGENT_ADMIN_URL"]
start_time = time.perf_counter()
processing_time = 0
processed_count = 0
# let's send a credential!
cred_responses = []
for credential in cred_input:
cred_def_key = "CRED_DEF_" + credential["schema"] + "_" + credential["version"]
credential_definition_id = app_config["schemas"][cred_def_key]
schema_name = credential["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
schema_version = schema_info["version"]
schema_id = app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
]
cred_req = {
"schema_issuer_did": app_config["DID"],
"issuer_did": app_config["DID"],
"schema_name": schema_name,
"cred_def_id": credential_definition_id,
"schema_version": schema_version,
"credential_proposal": {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview",
"attributes": [
{"name": attr_name, "value": attr_value}
for attr_name, attr_value in credential["attributes"].items()
],
},
"connection_id": app_config["TOB_CONNECTION"],
"schema_id": schema_id,
}
if TRACE_EVENTS:
cred_req["trace"] = True
thread = SendCredentialThread(
credential_definition_id,
cred_req,
agent_admin_url + "/issue-credential/send",
ADMIN_REQUEST_HEADERS,
)
thread.start()
thread.join()
cred_responses.append(thread.cred_response)
processed_count = processed_count + 1
processing_time = time.perf_counter() - start_time
print(">>> Processed", processed_count, "credentials in", processing_time)
print(" ", processing_time / processed_count, "seconds per credential")
return jsonify(cred_responses) | true | true |
f7f4b50a1751778c556d0163ed3ab50e01c4821b | 257 | py | Python | django_covid19/apps.py | zhangguoyuanshuai/Python-Covid19API | 2c5f69a8eed16df4c04af5137fb5574ea5125ee5 | [
"MIT"
] | 103 | 2020-05-07T06:13:25.000Z | 2022-03-27T14:15:35.000Z | django_covid19/apps.py | zhangguoyuanshuai/Python-Covid19API | 2c5f69a8eed16df4c04af5137fb5574ea5125ee5 | [
"MIT"
] | 13 | 2020-05-14T05:18:41.000Z | 2022-03-02T14:53:44.000Z | django_covid19/apps.py | zhangguoyuanshuai/Python-Covid19API | 2c5f69a8eed16df4c04af5137fb5574ea5125ee5 | [
"MIT"
] | 31 | 2020-05-17T13:24:09.000Z | 2022-03-28T09:22:31.000Z | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DjangoCovid19Config(AppConfig):
name = 'django_covid19'
verbose_name = _('django_covid19')
def ready(self):
import django_covid19.signals | 23.363636 | 55 | 0.754864 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DjangoCovid19Config(AppConfig):
name = 'django_covid19'
verbose_name = _('django_covid19')
def ready(self):
import django_covid19.signals | true | true |
f7f4b56f63f5d8c54b6e8c706fae9c24ea4913fe | 3,742 | py | Python | pyhole/plugins/admin.py | roaet/pyhole | 472ab6e51e475188eecdb0221a10e3ccc2332e09 | [
"Apache-2.0"
] | null | null | null | pyhole/plugins/admin.py | roaet/pyhole | 472ab6e51e475188eecdb0221a10e3ccc2332e09 | [
"Apache-2.0"
] | null | null | null | pyhole/plugins/admin.py | roaet/pyhole | 472ab6e51e475188eecdb0221a10e3ccc2332e09 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010-2015 Josh Kearney
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Administration Plugin"""
from pyhole.core import plugin
from pyhole.core import utils
class Admin(plugin.Plugin):
"""Provide administration functionality"""
@plugin.hook_add_command("help")
def help(self, message, params=None, **kwargs):
"""Learn how to use active commands (ex: .help <command>)."""
if params:
doc = _find_doc_string(params)
if doc:
message.dispatch(doc)
else:
message.dispatch("No help available for '%s'" % params)
else:
message.dispatch(self.help.__doc__)
message.dispatch("Active Commands: %s" % plugin.active_commands())
message.dispatch("Active Keywords: %s" % plugin.active_keywords())
@plugin.hook_add_command("version")
def version(self, message, params=None, **kwargs):
"""Display the current version."""
message.dispatch(self.session.version)
@plugin.hook_add_command("reload")
@utils.admin
def reload(self, message, params=None, **kwargs):
"""Reload all plugins."""
self.session.load_plugins(reload_plugins=True)
message.dispatch("Loaded Plugins: %s" % plugin.active_plugins())
@plugin.hook_add_command("op")
@utils.admin
@utils.require_params
def op(self, message, params=None, **kwargs):
"""Op a user (ex: .op <channel> <nick>)."""
self.session.op_user(params)
@plugin.hook_add_command("deop")
@utils.admin
@utils.require_params
def deop(self, message, params=None, **kwargs):
"""De-op a user (ex: .deop <channel> <nick>)."""
self.session.deop_user(params)
@plugin.hook_add_command("nick")
@utils.admin
@utils.require_params
def nick(self, message, params=None, **kwargs):
"""Change nick (ex: .nick <nick>)."""
self.session.set_nick(params)
@plugin.hook_add_command("join")
@utils.admin
@utils.require_params
def join(self, message, params=None, **kwargs):
"""Join a channel (ex: .join #channel [<key>])."""
self.session.join_channel(params)
@plugin.hook_add_command("part")
@utils.admin
@utils.require_params
def part(self, message, params=None, **kwargs):
"""Part a channel (ex: .part <channel>)."""
self.session.part_channel(params)
@plugin.hook_add_command("say")
@utils.admin
@utils.require_params
def say(self, message, params=None, **kwargs):
"""Send a PRIVMSG (ex: .say <channel>|<nick> message)."""
(target, msg) = params.split(" ", 1)
self.session.privmsg(target, msg)
def _find_doc_string(params):
"""Find the doc string for a plugin, command or keyword hook."""
for p in plugin.active_plugin_classes():
if p.__name__.upper() == params.upper():
return p.__doc__
for _, cmd_hook, cmd in plugin.hook_get_commands():
if cmd.upper() == params.upper():
return cmd_hook.__doc__
for _, kw_hook, kw in plugin.hook_get_keywords():
if kw.upper() == params.upper():
return kw_hook.__doc__
return None
| 34.018182 | 78 | 0.640299 |
from pyhole.core import plugin
from pyhole.core import utils
class Admin(plugin.Plugin):
@plugin.hook_add_command("help")
def help(self, message, params=None, **kwargs):
if params:
doc = _find_doc_string(params)
if doc:
message.dispatch(doc)
else:
message.dispatch("No help available for '%s'" % params)
else:
message.dispatch(self.help.__doc__)
message.dispatch("Active Commands: %s" % plugin.active_commands())
message.dispatch("Active Keywords: %s" % plugin.active_keywords())
@plugin.hook_add_command("version")
def version(self, message, params=None, **kwargs):
message.dispatch(self.session.version)
@plugin.hook_add_command("reload")
@utils.admin
def reload(self, message, params=None, **kwargs):
self.session.load_plugins(reload_plugins=True)
message.dispatch("Loaded Plugins: %s" % plugin.active_plugins())
@plugin.hook_add_command("op")
@utils.admin
@utils.require_params
def op(self, message, params=None, **kwargs):
self.session.op_user(params)
@plugin.hook_add_command("deop")
@utils.admin
@utils.require_params
def deop(self, message, params=None, **kwargs):
self.session.deop_user(params)
@plugin.hook_add_command("nick")
@utils.admin
@utils.require_params
def nick(self, message, params=None, **kwargs):
self.session.set_nick(params)
@plugin.hook_add_command("join")
@utils.admin
@utils.require_params
def join(self, message, params=None, **kwargs):
self.session.join_channel(params)
@plugin.hook_add_command("part")
@utils.admin
@utils.require_params
def part(self, message, params=None, **kwargs):
self.session.part_channel(params)
@plugin.hook_add_command("say")
@utils.admin
@utils.require_params
def say(self, message, params=None, **kwargs):
(target, msg) = params.split(" ", 1)
self.session.privmsg(target, msg)
def _find_doc_string(params):
for p in plugin.active_plugin_classes():
if p.__name__.upper() == params.upper():
return p.__doc__
for _, cmd_hook, cmd in plugin.hook_get_commands():
if cmd.upper() == params.upper():
return cmd_hook.__doc__
for _, kw_hook, kw in plugin.hook_get_keywords():
if kw.upper() == params.upper():
return kw_hook.__doc__
return None
| true | true |
f7f4b5b7445d488deeb01eef9aec21d416086347 | 10,707 | py | Python | keras/utils/layer_utils.py | IndigenousEngineering/keras_docker_with_NLTK | 075958831a3f74763ad1e094b3642f5174c7f817 | [
"MIT"
] | 300 | 2018-04-04T05:01:21.000Z | 2022-02-25T18:56:04.000Z | keras/utils/layer_utils.py | Qily/keras | 1d81a20292ca6926e595d06a6cd725dbb104a146 | [
"MIT"
] | 163 | 2018-04-03T17:41:22.000Z | 2021-09-03T16:44:04.000Z | keras/utils/layer_utils.py | Qily/keras | 1d81a20292ca6926e595d06a6cd725dbb104a146 | [
"MIT"
] | 72 | 2018-04-21T06:42:30.000Z | 2021-12-26T06:02:42.000Z | """Utilities related to layer/model functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .conv_utils import convert_kernel
from .. import backend as K
import numpy as np
def count_params(weights):
"""Count the total number of scalars composing the weights.
# Arguments
weights: An iterable containing the weights on which to compute params
# Returns
The total number of scalars composing the weights
"""
return int(np.sum([K.count_params(p) for p in set(weights)]))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
# Arguments
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers,
# for logging purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)',
'Output Shape',
'Param #',
'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')',
output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer.
# Arguments
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer +
'[' + str(inbound_node_index) + '][' +
str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [name +
' (' + cls_name + ')',
output_shape,
layer.count_params(),
first_connection]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
model._check_trainable_weights_consistency()
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn(
'Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
# Arguments
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
# Arguments
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
# Arguments
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
# Returns
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if not node.inbound_layers:
# Reached an Input layer, stop recursion.
return node.input_tensors
else:
source_tensors = []
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
previous_sources = get_source_inputs(x,
layer,
node_index)
# Avoid input redundancy.
for x in previous_sources:
if x not in source_tensors:
source_tensors.append(x)
return source_tensors
| 36.667808 | 79 | 0.580648 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .conv_utils import convert_kernel
from .. import backend as K
import numpy as np
def count_params(weights):
return int(np.sum([K.count_params(p) for p in set(weights)]))
def print_summary(model, line_length=None, positions=None, print_fn=None):
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
sequential_like = False
break
nodes += v
if sequential_like:
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
to_display = ['Layer (type)',
'Output Shape',
'Param #',
'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')',
output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer +
'[' + str(inbound_node_index) + '][' +
str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [name +
' (' + cls_name + ')',
output_shape,
layer.count_params(),
first_connection]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
model._check_trainable_weights_consistency()
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn(
'Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1))
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0))
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
def get_source_inputs(tensor, layer=None, node_index=None):
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if not node.inbound_layers:
return node.input_tensors
else:
source_tensors = []
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
previous_sources = get_source_inputs(x,
layer,
node_index)
for x in previous_sources:
if x not in source_tensors:
source_tensors.append(x)
return source_tensors
| true | true |
f7f4b5e2ecacf272093bdcb87da37d04d9a3e1b2 | 69,444 | py | Python | hooks/webkitpy/common/checkout/scm/scm_unittest.py | nizovn/luna-sysmgr | 48b7e2546e81d6ad1604353f2e5ab797a7d1667c | [
"Apache-2.0"
] | 3 | 2018-11-16T14:51:17.000Z | 2019-11-21T10:55:24.000Z | hooks/webkitpy/common/checkout/scm/scm_unittest.py | nizovn/luna-sysmgr | 48b7e2546e81d6ad1604353f2e5ab797a7d1667c | [
"Apache-2.0"
] | 1 | 2021-02-20T13:12:15.000Z | 2021-02-20T13:12:15.000Z | hooks/webkitpy/common/checkout/scm/scm_unittest.py | ericblade/luna-sysmgr | 82d5d7ced4ba21d3802eb2c8ae063236b6562331 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import atexit
import base64
import codecs
import getpass
import os
import os.path
import re
import stat
import sys
import subprocess
import tempfile
import time
import unittest
import urllib
import shutil
from datetime import date
from webkitpy.common.checkout.checkout import Checkout
from webkitpy.common.config.committers import Committer # FIXME: This should not be needed
from webkitpy.common.net.bugzilla import Attachment # FIXME: This should not be needed
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockExecutive
from .detection import find_checkout_root, default_scm, detect_scm_system
from .git import Git, AmbiguousCommitError
from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
from .svn import SVN
# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
# We store it in a global variable so that we can delete this cached repo on exit(3).
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
cached_svn_repo_path = None
def remove_dir(path):
# Change directory to / to ensure that we aren't in the directory we want to delete.
os.chdir('/')
shutil.rmtree(path)
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
@atexit.register
def delete_cached_mock_repo_at_exit():
if cached_svn_repo_path:
remove_dir(cached_svn_repo_path)
# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
def run_command(*args, **kwargs):
# FIXME: This should not be a global static.
# New code should use Executive.run_command directly instead
return Executive().run_command(*args, **kwargs)
# FIXME: This should be unified into one of the executive.py commands!
# Callers could use run_and_throw_if_fail(args, cwd=cwd, quiet=True)
def run_silent(args, cwd=None):
# Note: Not thread safe: http://bugs.python.org/issue2320
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process.communicate() # ignore output
exit_code = process.wait()
if exit_code:
raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
def write_into_file_at_path(file_path, contents, encoding="utf-8"):
if encoding:
with codecs.open(file_path, "w", encoding) as file:
file.write(contents)
else:
with open(file_path, "w") as file:
file.write(contents)
def read_from_path(file_path, encoding="utf-8"):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
def _make_diff(command, *args):
# We use this wrapper to disable output decoding. diffs should be treated as
# binary files since they may include text files of multiple differnet encodings.
# FIXME: This should use an Executive.
return run_command([command, "diff"] + list(args), decode_output=False)
def _svn_diff(*args):
return _make_diff("svn", *args)
def _git_diff(*args):
return _make_diff("git", *args)
# Exists to share svn repository creation code between the git and svn tests
class SVNTestRepository:
@classmethod
def _svn_add(cls, path):
run_command(["svn", "add", path])
@classmethod
def _svn_commit(cls, message):
run_command(["svn", "commit", "--quiet", "--message", message])
@classmethod
def _setup_test_commits(cls, svn_repo_url):
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Add some test commits
os.chdir(svn_checkout_path)
write_into_file_at_path("test_file", "test1")
cls._svn_add("test_file")
cls._svn_commit("initial commit")
write_into_file_at_path("test_file", "test1test2")
# This used to be the last commit, but doing so broke
# GitTest.test_apply_git_patch which use the inverse diff of the last commit.
# svn-apply fails to remove directories in Git, see:
# https://bugs.webkit.org/show_bug.cgi?id=34871
os.mkdir("test_dir")
# Slash should always be the right path separator since we use cygwin on Windows.
test_file3_path = "test_dir/test_file3"
write_into_file_at_path(test_file3_path, "third file")
cls._svn_add("test_dir")
cls._svn_commit("second commit")
write_into_file_at_path("test_file", "test1test2test3\n")
write_into_file_at_path("test_file2", "second file")
cls._svn_add("test_file2")
cls._svn_commit("third commit")
# This 4th commit is used to make sure that our patch file handling
# code correctly treats patches as binary and does not attempt to
# decode them assuming they're utf-8.
write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1")
write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8")
cls._svn_commit("fourth commit")
# svn does not seem to update after commit as I would expect.
run_command(['svn', 'update'])
remove_dir(svn_checkout_path)
# This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
# GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
# it since it's expensive to create the mock repo.
@classmethod
def setup(cls, test_object):
global cached_svn_repo_path
if not cached_svn_repo_path:
cached_svn_repo_path = cls._setup_mock_repo()
test_object.temp_directory = tempfile.mkdtemp(suffix="svn_test")
test_object.svn_repo_path = os.path.join(test_object.temp_directory, "repo")
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path
test_object.svn_checkout_path = os.path.join(test_object.temp_directory, "checkout")
shutil.copytree(cached_svn_repo_path, test_object.svn_repo_path)
run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + "/trunk", test_object.svn_checkout_path])
@classmethod
def _setup_mock_repo(cls):
# Create an test SVN repository
svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
# Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
run_command(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
# Create a test svn checkout
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
os.chdir(svn_checkout_path)
os.mkdir('trunk')
cls._svn_add('trunk')
# We can add tags and branches as well if we ever need to test those.
cls._svn_commit('add trunk')
# Change directory out of the svn checkout so we can delete the checkout directory.
remove_dir(svn_checkout_path)
cls._setup_test_commits(svn_repo_url + "/trunk")
return svn_repo_path
@classmethod
def tear_down(cls, test_object):
remove_dir(test_object.temp_directory)
# Now that we've deleted the checkout paths, cwddir may be invalid
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
if os.path.isabs(__file__):
path = os.path.dirname(__file__)
else:
path = sys.path[0]
os.chdir(detect_scm_system(path).checkout_root)
class StandaloneFunctionsTest(unittest.TestCase):
"""This class tests any standalone/top-level functions in the package."""
def setUp(self):
self.orig_cwd = os.path.abspath(os.getcwd())
self.orig_abspath = os.path.abspath
# We capture but ignore the output from stderr to reduce unwanted
# logging.
self.output = OutputCapture()
self.output.capture_output()
def tearDown(self):
os.chdir(self.orig_cwd)
os.path.abspath = self.orig_abspath
self.output.restore_output()
def test_find_checkout_root(self):
# Test from inside the tree.
os.chdir(sys.path[0])
dir = find_checkout_root()
self.assertNotEqual(dir, None)
self.assertTrue(os.path.exists(dir))
# Test from outside the tree.
os.chdir(os.path.expanduser("~"))
dir = find_checkout_root()
self.assertNotEqual(dir, None)
self.assertTrue(os.path.exists(dir))
# Mock out abspath() to test being not in a checkout at all.
os.path.abspath = lambda x: "/"
self.assertRaises(SystemExit, find_checkout_root)
os.path.abspath = self.orig_abspath
def test_default_scm(self):
# Test from inside the tree.
os.chdir(sys.path[0])
scm = default_scm()
self.assertNotEqual(scm, None)
# Test from outside the tree.
os.chdir(os.path.expanduser("~"))
dir = find_checkout_root()
self.assertNotEqual(dir, None)
# Mock out abspath() to test being not in a checkout at all.
os.path.abspath = lambda x: "/"
self.assertRaises(SystemExit, default_scm)
os.path.abspath = self.orig_abspath
# For testing the SCM baseclass directly.
class SCMClassTests(unittest.TestCase):
def setUp(self):
self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet.
def tearDown(self):
self.dev_null.close()
def test_run_command_with_pipe(self):
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
# Test the non-pipe case too:
self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
command_returns_non_zero = ['/bin/sh', '--invalid-option']
# Test when the input pipe process fails.
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertTrue(input_process.poll() != 0)
self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
# Test when the run_command process fails.
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
def test_error_handlers(self):
git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
svn_failure_message="""svn: Commit failed (details follow):
svn: File or directory 'ChangeLog' is out of date; try updating
svn: resource out of date; try updating
"""
command_does_not_exist = ['does_not_exist', 'invalid_option']
self.assertRaises(OSError, run_command, command_does_not_exist)
self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
command_returns_non_zero = ['/bin/sh', '--invalid-option']
self.assertRaises(ScriptError, run_command, command_returns_non_zero)
# Check if returns error text:
self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass.
class SCMTest(unittest.TestCase):
def _create_patch(self, patch_contents):
# FIXME: This code is brittle if the Attachment API changes.
attachment = Attachment({"bug_id": 12345}, None)
attachment.contents = lambda: patch_contents
joe_cool = Committer("Joe Cool", "joe@cool.com")
attachment.reviewer = lambda: joe_cool
return attachment
def _setup_webkittools_scripts_symlink(self, local_scm):
webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
webkit_scripts_directory = webkit_scm.scripts_directory()
local_scripts_directory = local_scm.scripts_directory()
os.mkdir(os.path.dirname(local_scripts_directory))
os.symlink(webkit_scripts_directory, local_scripts_directory)
# Tests which both GitTest and SVNTest should run.
# FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
def _shared_test_changed_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertEqual(self.scm.changed_files(), ["test_file"])
write_into_file_at_path("test_dir/test_file3", "new stuff")
self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
old_cwd = os.getcwd()
os.chdir("test_dir")
# Validate that changed_files does not change with our cwd, see bug 37015.
self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
os.chdir(old_cwd)
def _shared_test_added_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertEqual(self.scm.added_files(), [])
write_into_file_at_path("added_file", "new stuff")
self.scm.add("added_file")
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file2", "new stuff")
self.scm.add("added_dir")
# SVN reports directory changes, Git does not.
added_files = self.scm.added_files()
if "added_dir" in added_files:
added_files.remove("added_dir")
self.assertEqual(added_files, ["added_dir/added_file2", "added_file"])
# Test also to make sure clean_working_directory removes added files
self.scm.clean_working_directory()
self.assertEqual(self.scm.added_files(), [])
self.assertFalse(os.path.exists("added_file"))
self.assertFalse(os.path.exists("added_dir"))
def _shared_test_changed_files_for_revision(self):
# SVN reports directory changes, Git does not.
changed_files = self.scm.changed_files_for_revision(3)
if "test_dir" in changed_files:
changed_files.remove("test_dir")
self.assertEqual(changed_files, ["test_dir/test_file3", "test_file"])
self.assertEqual(sorted(self.scm.changed_files_for_revision(4)), sorted(["test_file", "test_file2"])) # Git and SVN return different orders.
self.assertEqual(self.scm.changed_files_for_revision(2), ["test_file"])
def _shared_test_contents_at_revision(self):
self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n")
# Verify that contents_at_revision returns a byte array, aka str():
self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file")
# Files which don't exist:
# Currently we raise instead of returning None because detecting the difference between
# "file not found" and any other error seems impossible with svn (git seems to expose such through the return code).
self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2)
self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
def _shared_test_revisions_changing_file(self):
self.assertEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
def _shared_test_committer_email_for_revision(self):
self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser()) # Committer "email" will be the current user
def _shared_test_reverse_diff(self):
self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
# Only test the simple case, as any other will end up with conflict markers.
self.scm.apply_reverse_diff('5')
self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
def _shared_test_diff_for_revision(self):
# Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
r3_patch = self.scm.diff_for_revision(4)
self.assertTrue(re.search('test3', r3_patch))
self.assertFalse(re.search('test4', r3_patch))
self.assertTrue(re.search('test2', r3_patch))
self.assertTrue(re.search('test2', self.scm.diff_for_revision(3)))
def _shared_test_svn_apply_git_patch(self):
self._setup_webkittools_scripts_symlink(self.scm)
git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
new file mode 100644
index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
60151690
GIT binary patch
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
literal 0
HcmV?d00001
"""
self.checkout.apply_patch(self._create_patch(git_binary_addition))
added = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual(512, len(added))
self.assertTrue(added.startswith('GIF89a'))
self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
# The file already exists.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
GIT binary patch
literal 7
OcmYex&reD$;sO8*F9L)B
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
"""
self.checkout.apply_patch(self._create_patch(git_binary_modification))
modified = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual('foobar\n', modified)
self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
# Applying the same modification should fail.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
deleted file mode 100644
index 323fae0..0000000
GIT binary patch
literal 0
HcmV?d00001
literal 7
OcmYex&reD$;sO8*F9L)B
"""
self.checkout.apply_patch(self._create_patch(git_binary_deletion))
self.assertFalse(os.path.exists('fizzbuzz7.gif'))
self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files())
# Cannot delete again.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
def _shared_test_add_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertTrue("added_dir/added_file" in self.scm.added_files())
def _shared_test_delete_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertTrue("added_dir/added_file" in self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertFalse("added_dir" in self.scm.added_files())
def _shared_test_delete_recursively_or_not(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
write_into_file_at_path("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
self.assertTrue("added_dir/added_file" in self.scm.added_files())
self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
def _shared_test_exists(self, scm, commit_function):
os.chdir(scm.checkout_root)
self.assertFalse(scm.exists('foo.txt'))
write_into_file_at_path('foo.txt', 'some stuff')
self.assertFalse(scm.exists('foo.txt'))
scm.add('foo.txt')
commit_function('adding foo')
self.assertTrue(scm.exists('foo.txt'))
scm.delete('foo.txt')
commit_function('deleting foo')
self.assertFalse(scm.exists('foo.txt'))
def _shared_test_head_svn_revision(self):
self.assertEqual(self.scm.head_svn_revision(), '5')
# Context manager that overrides the current timezone.
class TimezoneOverride(object):
def __init__(self, timezone_string):
self._timezone_string = timezone_string
def __enter__(self):
if hasattr(time, 'tzset'):
self._saved_timezone = os.environ.get('TZ', None)
os.environ['TZ'] = self._timezone_string
time.tzset()
def __exit__(self, type, value, traceback):
if hasattr(time, 'tzset'):
if self._saved_timezone:
os.environ['TZ'] = self._saved_timezone
else:
del os.environ['TZ']
time.tzset()
class SVNTest(SCMTest):
@staticmethod
def _set_date_and_reviewer(changelog_entry):
# Joe Cool matches the reviewer set in SCMTest._create_patch
changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
# svn-apply will update ChangeLog entries with today's date (as in Cupertino, CA, US)
with TimezoneOverride('PST8PDT'):
return changelog_entry.replace('DATE_HERE', date.today().isoformat())
def test_svn_apply(self):
first_entry = """2009-10-26 Eric Seidel <eric@webkit.org>
Reviewed by Foo Bar.
Most awesome change ever.
* scm_unittest.py:
"""
intermediate_entry = """2009-10-27 Eric Seidel <eric@webkit.org>
Reviewed by Baz Bar.
A more awesomer change yet!
* scm_unittest.py:
"""
one_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -1,5 +1,13 @@
2009-10-26 Eric Seidel <eric@webkit.org>
%(whitespace)s
+ Reviewed by NOBODY (OOPS!).
+
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <eric@webkit.org>
+
Reviewed by Foo Bar.
%(whitespace)s
Most awesome change ever.
""" % {'whitespace': ' '}
one_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
Reviewed by REVIEWER_HERE.
Second most awesome change ever.
* scm_unittest.py:
"""
two_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -2,6 +2,14 @@
%(whitespace)s
Reviewed by Foo Bar.
%(whitespace)s
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
Most awesome change ever.
%(whitespace)s
* scm_unittest.py:
""" % {'whitespace': ' '}
two_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
Reviewed by Foo Bar.
Second most awesome change ever.
* scm_unittest.py:
"""
write_into_file_at_path('ChangeLog', first_entry)
run_command(['svn', 'add', 'ChangeLog'])
run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
# Patch files were created against just 'first_entry'.
# Add a second commit to make svn-apply have to apply the patches with fuzz.
changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
write_into_file_at_path('ChangeLog', changelog_contents)
run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(one_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
self.scm.revert_files(['ChangeLog'])
self.checkout.apply_patch(self._create_patch(two_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
def test_detect_scm_system_relative_url(self):
scm = detect_scm_system(".")
# I wanted to assert that we got the right path, but there was some
# crazy magic with temp folder names that I couldn't figure out.
self.assertTrue(scm.checkout_root)
def test_create_patch_is_full_patch(self):
test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2")
os.mkdir(test_dir_path)
test_file_path = os.path.join(test_dir_path, 'test_file2')
write_into_file_at_path(test_file_path, 'test content')
run_command(['svn', 'add', 'test_dir2'])
# create_patch depends on 'svn-create-patch', so make a dummy version.
scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts')
os.makedirs(scripts_path)
create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
# Change into our test directory and run the create_patch command.
os.chdir(test_dir_path)
scm = detect_scm_system(test_dir_path)
self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
patch_contents = scm.create_patch()
# Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
def test_detection(self):
scm = detect_scm_system(self.svn_checkout_path)
self.assertEqual(scm.display_name(), "svn")
self.assertEqual(scm.supports_local_commits(), False)
def test_apply_small_binary_patch(self):
patch_contents = """Index: test_file.swf
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes on: test_file.swf
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
"""
expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
self._setup_webkittools_scripts_symlink(self.scm)
patch_file = self._create_patch(patch_contents)
self.checkout.apply_patch(patch_file)
actual_contents = read_from_path("test_file.swf", encoding=None)
self.assertEqual(actual_contents, expected_contents)
def test_apply_svn_patch(self):
scm = detect_scm_system(self.svn_checkout_path)
patch = self._create_patch(_svn_diff("-r5:4"))
self._setup_webkittools_scripts_symlink(scm)
Checkout(scm).apply_patch(patch)
def test_apply_svn_patch_force(self):
scm = detect_scm_system(self.svn_checkout_path)
patch = self._create_patch(_svn_diff("-r3:5"))
self._setup_webkittools_scripts_symlink(scm)
self.assertRaises(ScriptError, Checkout(scm).apply_patch, patch, force=True)
def test_commit_logs(self):
# Commits have dates and usernames in them, so we can't just direct compare.
self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log()))
self.assertTrue(re.search('second commit', self.scm.svn_commit_log(3)))
def _shared_test_commit_with_message(self, username=None):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
self.scm.dryrun = True
write_into_file_at_path('test_file', 'still more test content')
commit_text = self.scm.commit_with_message("yet another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0')
def test_commit_in_subdir(self, username=None):
write_into_file_at_path('test_dir/test_file3', 'more test content')
os.chdir("test_dir")
commit_text = self.scm.commit_with_message("another test commit", username)
os.chdir("..")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_text_parsing(self):
self._shared_test_commit_with_message()
def test_commit_with_username(self):
self._shared_test_commit_with_message("dbates@webkit.org")
def test_commit_without_authorization(self):
self.scm.has_authorization_for_realm = lambda realm: False
self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
def test_has_authorization_for_realm_using_credentials_with_passtype(self):
credentials = """
K 8
passtype
V 8
keychain
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_has_authorization_for_realm_using_credentials_with_password(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
K 8
password
V 4
blah
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
scm = detect_scm_system(self.svn_checkout_path)
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
write_into_file_at_path(fake_webkit_auth_file, credentials)
result = scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
os.remove(fake_webkit_auth_file)
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
return result
def test_not_have_authorization_for_realm_with_credentials_missing_password_and_passtype(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
END
"""
self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
scm = detect_scm_system(self.svn_checkout_path)
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
self.assertFalse(scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_added_files(self):
self._shared_test_added_files()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
os.chdir(self.svn_checkout_path)
self.scm.delete("test_file")
self.assertTrue("test_file" in self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_propset_propget(self):
filepath = os.path.join(self.svn_checkout_path, "test_file")
expected_mime_type = "x-application/foo-bar"
self.scm.propset("svn:mime-type", expected_mime_type, filepath)
self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath))
def test_show_head(self):
write_into_file_at_path("test_file", u"Hello!", "utf-8")
SVNTestRepository._svn_commit("fourth commit")
self.assertEqual("Hello!", self.scm.show_head('test_file'))
def test_show_head_binary(self):
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def do_test_diff_for_file(self):
write_into_file_at_path('test_file', 'some content')
self.scm.commit_with_message("a test commit")
diff = self.scm.diff_for_file('test_file')
self.assertEqual(diff, "")
write_into_file_at_path("test_file", "changed content")
diff = self.scm.diff_for_file('test_file')
self.assertTrue("-some content" in diff)
self.assertTrue("+changed content" in diff)
def clean_bogus_dir(self):
self.bogus_dir = self.scm._bogus_dir_name()
if os.path.exists(self.bogus_dir):
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_existing_bogus_dir(self):
self.clean_bogus_dir()
os.mkdir(self.bogus_dir)
self.do_test_diff_for_file()
self.assertTrue(os.path.exists(self.bogus_dir))
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_missing_bogus_dir(self):
self.clean_bogus_dir()
self.do_test_diff_for_file()
self.assertFalse(os.path.exists(self.bogus_dir))
def test_svn_lock(self):
svn_root_lock_path = ".svn/lock"
write_into_file_at_path(svn_root_lock_path, "", "utf-8")
# webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
self.assertRaises(ScriptError, run_command, ['svn', 'update'])
self.scm.clean_working_directory()
self.assertFalse(os.path.exists(svn_root_lock_path))
run_command(['svn', 'update']) # Should succeed and not raise.
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_with_message)
class GitTest(SCMTest):
def setUp(self):
"""Sets up fresh git repository with one commit. Then setups a second git
repo that tracks the first one."""
# FIXME: We should instead clone a git repo that is tracking an SVN repo.
# That better matches what we do with WebKit.
self.original_dir = os.getcwd()
self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2")
run_command(['git', 'init', self.untracking_checkout_path])
os.chdir(self.untracking_checkout_path)
write_into_file_at_path('foo_file', 'foo')
run_command(['git', 'add', 'foo_file'])
run_command(['git', 'commit', '-am', 'dummy commit'])
self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
os.chdir(self.tracking_git_checkout_path)
self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
def tearDown(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.tracking_git_checkout_path])
run_command(['rm', '-rf', self.untracking_checkout_path])
def test_remote_branch_ref(self):
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master')
os.chdir(self.untracking_checkout_path)
self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref)
def test_multiple_remotes(self):
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1')
def test_create_patch(self):
write_into_file_at_path('test_file_commit1', 'contents')
run_command(['git', 'add', 'test_file_commit1'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertFalse(re.search(r'Subversion Revision:', patch))
def test_exists(self):
scm = self.untracking_scm
self._shared_test_exists(scm, scm.commit_locally_with_message)
def test_head_svn_revision(self):
scm = detect_scm_system(self.untracking_checkout_path)
# If we cloned a git repo tracking an SVG repo, this would give the same result as
# self._shared_test_head_svn_revision().
self.assertEqual(scm.head_svn_revision(), '')
def test_rename_files(self):
scm = self.tracking_scm
run_command(['git', 'mv', 'foo_file', 'bar_file'])
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertFalse(re.search(r'rename from ', patch))
self.assertFalse(re.search(r'rename to ', patch))
class GitSVNTest(SCMTest):
def _setup_git_checkout(self):
self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
# --quiet doesn't make git svn silent, so we use run_silent to redirect output
run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
os.chdir(self.git_checkout_path)
def _tear_down_git_checkout(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.git_checkout_path])
def setUp(self):
self.original_dir = os.getcwd()
SVNTestRepository.setup(self)
self._setup_git_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
self._tear_down_git_checkout()
def test_detection(self):
scm = detect_scm_system(self.git_checkout_path)
self.assertEqual(scm.display_name(), "git")
self.assertEqual(scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
value = 'git-config value'
run_command(['git', 'config', key, value])
self.assertEqual(self.scm.read_git_config(key), value)
def test_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
def test_discard_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
self.scm.discard_local_commits()
self.assertEqual(len(self.scm.local_commits()), 0)
def test_delete_branch(self):
new_branch = 'foo'
run_command(['git', 'checkout', '-b', new_branch])
self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
run_command(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
self.assertFalse(re.search(r'foo', run_command(['git', 'branch'])))
def test_remote_merge_base(self):
# Diff to merge-base should include working-copy changes,
# which the diff to svn_branch.. doesn't.
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
self.assertFalse(re.search(r'foo', diff_to_common_base))
self.assertTrue(re.search(r'foo', diff_to_merge_base))
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
write_into_file_at_path(svn_test_file, "svn_checkout")
run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(git_test_file, "git_checkout")
run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
scm = detect_scm_system(self.git_checkout_path)
self.assertTrue(scm.rebase_in_progress())
# Make sure our cleanup works.
scm.clean_working_directory()
self.assertFalse(scm.rebase_in_progress())
# Make sure cleanup doesn't throw when no rebase is in progress.
scm.clean_working_directory()
def test_commitish_parsing(self):
scm = detect_scm_system(self.git_checkout_path)
# Multiple revisions are cherry-picked.
self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
# ... is an invalid range specifier
self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
def test_commitish_order(self):
scm = detect_scm_system(self.git_checkout_path)
commit_range = 'HEAD~3..HEAD'
actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
scm = detect_scm_system(self.git_checkout_path)
# We carefullly pick a diff which does not have a directory addition
# as currently svn-apply will error out when trying to remove directories
# in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
patch = self._create_patch(_git_diff('HEAD..HEAD^'))
self._setup_webkittools_scripts_symlink(scm)
Checkout(scm).apply_patch(patch)
def test_apply_git_patch_force(self):
scm = detect_scm_system(self.git_checkout_path)
patch = self._create_patch(_git_diff('HEAD~2..HEAD'))
self._setup_webkittools_scripts_symlink(scm)
self.assertRaises(ScriptError, Checkout(scm).apply_patch, patch, force=True)
def test_commit_text_parsing(self):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
self.scm.dryrun = True
write_into_file_at_path('test_file', 'still more test content')
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0')
def test_commit_with_message_working_copy_only(self):
write_into_file_at_path('test_file_commit1', 'more test content')
run_command(['git', 'add', 'test_file_commit1'])
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("yet another test commit")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def _local_commit(self, filename, contents, message):
write_into_file_at_path(filename, contents)
run_command(['git', 'add', filename])
self.scm.commit_locally_with_message(message)
def _one_local_commit(self):
self._local_commit('test_file_commit1', 'more test content', 'another test commit')
def _one_local_commit_plus_working_copy_changes(self):
self._one_local_commit()
write_into_file_at_path('test_file_commit2', 'still more test content')
run_command(['git', 'add', 'test_file_commit2'])
def _two_local_commits(self):
self._one_local_commit()
self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
def _three_local_commits(self):
self._local_commit('test_file_commit0', 'more test content', 'another test commit')
self._two_local_commits()
def test_revisions_changing_files_with_local_commit(self):
self._one_local_commit()
self.assertEquals(self.scm.revisions_changing_file('test_file_commit1'), [])
def test_commit_with_message(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_git_commit(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit", git_commit="HEAD^")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit1', svn_log))
self.assertFalse(re.search(r'test_file_commit2', svn_log))
def test_commit_with_message_git_commit_range(self):
self._three_local_commits()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertFalse(re.search(r'test_file_commit0', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
self.assertTrue(re.search(r'test_file_commit2', svn_log))
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit", git_commit="HEAD..")
self.assertFalse(re.search(r'test_file_commit1', svn_log))
self.assertTrue(re.search(r'test_file_commit2', svn_log))
def test_commit_with_message_only_local_commit(self):
self._one_local_commit()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit")
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_multiple_local_commits_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
commit_text = scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_git_commit_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", git_commit="HEAD^")
def test_commit_with_message_multiple_local_commits_always_squash(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
scm._assert_can_squash = lambda working_directory_is_clean: True
commit_text = scm.commit_with_message("yet another test commit")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_multiple_local_commits(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
commit_text = scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertFalse(re.search(r'test_file2', svn_log))
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_not_synced_with_conflict(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._local_commit('test_file2', 'asdf', 'asdf commit')
scm = detect_scm_system(self.git_checkout_path)
# There's a conflict between trunk and the test_file2 modification.
self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", force_squash=True)
def test_remote_branch_ref(self):
self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk')
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'Subversion Revision: 5', patch))
def test_create_patch_after_merge(self):
run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
run_command(['git', 'merge', 'trunk'])
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'Subversion Revision: 5', patch))
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(changed_files=['test_file_commit2'])
self.assertTrue(re.search(r'test_file_commit2', patch))
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
os.remove('test_file_commit1')
patch = scm.create_patch()
patch_with_changed_files = scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEquals(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(git_commit="HEAD^")
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertFalse(re.search(r'test_file_commit2', patch))
def test_create_patch_git_commit_range(self):
self._three_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(git_commit="HEAD~2..HEAD")
self.assertFalse(re.search(r'test_file_commit0', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(git_commit="HEAD..")
self.assertFalse(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
def test_create_patch_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertFalse(re.search(r'test_file2', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
def test_create_binary_patch(self):
# Create a git binary patch and check the contents.
scm = detect_scm_system(self.git_checkout_path)
test_file_name = 'binary_file'
test_file_path = os.path.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
patch = scm.create_patch()
self.assertTrue(re.search(r'\nliteral 0\n', patch))
self.assertTrue(re.search(r'\nliteral 256\n', patch))
# Check if we can apply the created patch.
run_command(['git', 'rm', '-f', test_file_name])
self._setup_webkittools_scripts_symlink(scm)
self.checkout.apply_patch(self._create_patch(patch))
self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
# Check if we can create a patch from a local commit.
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
run_command(['git', 'commit', '-m', 'binary diff'])
patch_from_local_commit = scm.create_patch('HEAD')
self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit))
self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit))
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertTrue('test_file_commit1' in files)
self.assertTrue('test_file_commit2' in files)
def test_changed_files_git_commit(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files(git_commit="HEAD^")
self.assertTrue('test_file_commit1' in files)
self.assertFalse('test_file_commit2' in files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files(git_commit="HEAD~2..HEAD")
self.assertTrue('test_file_commit0' not in files)
self.assertTrue('test_file_commit1' in files)
self.assertTrue('test_file_commit2' in files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files(git_commit="HEAD..")
self.assertFalse('test_file_commit1' in files)
self.assertTrue('test_file_commit2' in files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertTrue('test_file_commit2' in files)
self.assertTrue('test_file_commit1' in files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertFalse('test_file2' in files)
self.assertTrue('test_file_commit2' in files)
self.assertTrue('test_file_commit1' in files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertFalse('test_file2' in files)
self.assertTrue('test_file_commit2' in files)
self.assertTrue('test_file_commit1' in files)
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_added_files(self):
self._shared_test_added_files()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
self.assertTrue("test_file_commit1" in self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_to_object_name(self):
relpath = 'test_file_commit1'
fullpath = os.path.join(self.git_checkout_path, relpath)
self._two_local_commits()
self.assertEqual(relpath, self.scm.to_object_name(fullpath))
def test_show_head(self):
self._two_local_commits()
self.assertEqual("more test content", self.scm.show_head('test_file_commit1'))
def test_show_head_binary(self):
self._two_local_commits()
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_locally_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def test_diff_for_file(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', "Updated", encoding=None)
diff = self.scm.diff_for_file('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertTrue("+Updated" in diff)
self.assertTrue("-more test content" in diff)
self.scm.add('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertTrue("+Updated" in cached_diff)
self.assertTrue("-more test content" in cached_diff)
def test_exists(self):
scm = detect_scm_system(self.git_checkout_path)
self._shared_test_exists(scm, scm.commit_locally_with_message)
# We need to split off more of these SCM tests to use mocks instead of the filesystem.
# This class is the first part of that.
class GitTestWithMock(unittest.TestCase):
def make_scm(self, logging_executive=False):
# We do this should_log dance to avoid logging when Git.__init__ runs sysctl on mac to check for 64-bit support.
scm = Git(cwd=None, executive=MockExecutive())
scm._executive._should_log = logging_executive
return scm
def test_create_patch(self):
scm = self.make_scm(logging_executive=True)
expected_stderr = "MOCK run_command: ['git', 'merge-base', u'refs/remotes/origin/master', 'HEAD'], cwd=%(checkout)s\nMOCK run_command: ['git', 'diff', '--binary', '--no-ext-diff', '--full-index', '-M', 'MOCK output of child process', '--'], cwd=%(checkout)s\nMOCK run_command: ['git', 'log', '-25'], cwd=None\n" % {'checkout': scm.checkout_root}
OutputCapture().assert_outputs(self, scm.create_patch, expected_stderr=expected_stderr)
def test_push_local_commits_to_server_with_username_and_password(self):
self.assertEquals(self.make_scm().push_local_commits_to_server(username='dbates@webkit.org', password='blah'), "MOCK output of child process")
def test_push_local_commits_to_server_without_username_and_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server)
def test_push_local_commits_to_server_with_username_and_without_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'username': 'dbates@webkit.org'})
def test_push_local_commits_to_server_without_username_and_with_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
if __name__ == '__main__':
unittest.main()
| 43.813249 | 353 | 0.70536 |
from __future__ import with_statement
import atexit
import base64
import codecs
import getpass
import os
import os.path
import re
import stat
import sys
import subprocess
import tempfile
import time
import unittest
import urllib
import shutil
from datetime import date
from webkitpy.common.checkout.checkout import Checkout
from webkitpy.common.config.committers import Committer
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockExecutive
from .detection import find_checkout_root, default_scm, detect_scm_system
from .git import Git, AmbiguousCommitError
from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
from .svn import SVN
# We store it in a global variable so that we can delete this cached repo on exit(3).
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
cached_svn_repo_path = None
def remove_dir(path):
# Change directory to / to ensure that we aren't in the directory we want to delete.
os.chdir('/')
shutil.rmtree(path)
@atexit.register
def delete_cached_mock_repo_at_exit():
if cached_svn_repo_path:
remove_dir(cached_svn_repo_path)
def run_command(*args, **kwargs):
return Executive().run_command(*args, **kwargs)
def run_silent(args, cwd=None):
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process.communicate()
exit_code = process.wait()
if exit_code:
raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
def write_into_file_at_path(file_path, contents, encoding="utf-8"):
if encoding:
with codecs.open(file_path, "w", encoding) as file:
file.write(contents)
else:
with open(file_path, "w") as file:
file.write(contents)
def read_from_path(file_path, encoding="utf-8"):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
def _make_diff(command, *args):
return run_command([command, "diff"] + list(args), decode_output=False)
def _svn_diff(*args):
return _make_diff("svn", *args)
def _git_diff(*args):
return _make_diff("git", *args)
class SVNTestRepository:
@classmethod
def _svn_add(cls, path):
run_command(["svn", "add", path])
@classmethod
def _svn_commit(cls, message):
run_command(["svn", "commit", "--quiet", "--message", message])
@classmethod
def _setup_test_commits(cls, svn_repo_url):
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
os.chdir(svn_checkout_path)
write_into_file_at_path("test_file", "test1")
cls._svn_add("test_file")
cls._svn_commit("initial commit")
write_into_file_at_path("test_file", "test1test2")
os.mkdir("test_dir")
test_file3_path = "test_dir/test_file3"
write_into_file_at_path(test_file3_path, "third file")
cls._svn_add("test_dir")
cls._svn_commit("second commit")
write_into_file_at_path("test_file", "test1test2test3\n")
write_into_file_at_path("test_file2", "second file")
cls._svn_add("test_file2")
cls._svn_commit("third commit")
write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1")
write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8")
cls._svn_commit("fourth commit")
# svn does not seem to update after commit as I would expect.
run_command(['svn', 'update'])
remove_dir(svn_checkout_path)
# This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
@classmethod
def setup(cls, test_object):
global cached_svn_repo_path
if not cached_svn_repo_path:
cached_svn_repo_path = cls._setup_mock_repo()
test_object.temp_directory = tempfile.mkdtemp(suffix="svn_test")
test_object.svn_repo_path = os.path.join(test_object.temp_directory, "repo")
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path
test_object.svn_checkout_path = os.path.join(test_object.temp_directory, "checkout")
shutil.copytree(cached_svn_repo_path, test_object.svn_repo_path)
run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + "/trunk", test_object.svn_checkout_path])
@classmethod
def _setup_mock_repo(cls):
# Create an test SVN repository
svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
run_command(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
os.chdir(svn_checkout_path)
os.mkdir('trunk')
cls._svn_add('trunk')
# We can add tags and branches as well if we ever need to test those.
cls._svn_commit('add trunk')
# Change directory out of the svn checkout so we can delete the checkout directory.
remove_dir(svn_checkout_path)
cls._setup_test_commits(svn_repo_url + "/trunk")
return svn_repo_path
@classmethod
def tear_down(cls, test_object):
remove_dir(test_object.temp_directory)
# Now that we've deleted the checkout paths, cwddir may be invalid
if os.path.isabs(__file__):
path = os.path.dirname(__file__)
else:
path = sys.path[0]
os.chdir(detect_scm_system(path).checkout_root)
class StandaloneFunctionsTest(unittest.TestCase):
def setUp(self):
self.orig_cwd = os.path.abspath(os.getcwd())
self.orig_abspath = os.path.abspath
self.output = OutputCapture()
self.output.capture_output()
def tearDown(self):
os.chdir(self.orig_cwd)
os.path.abspath = self.orig_abspath
self.output.restore_output()
def test_find_checkout_root(self):
os.chdir(sys.path[0])
dir = find_checkout_root()
self.assertNotEqual(dir, None)
self.assertTrue(os.path.exists(dir))
os.chdir(os.path.expanduser("~"))
dir = find_checkout_root()
self.assertNotEqual(dir, None)
self.assertTrue(os.path.exists(dir))
os.path.abspath = lambda x: "/"
self.assertRaises(SystemExit, find_checkout_root)
os.path.abspath = self.orig_abspath
def test_default_scm(self):
os.chdir(sys.path[0])
scm = default_scm()
self.assertNotEqual(scm, None)
os.chdir(os.path.expanduser("~"))
dir = find_checkout_root()
self.assertNotEqual(dir, None)
os.path.abspath = lambda x: "/"
self.assertRaises(SystemExit, default_scm)
os.path.abspath = self.orig_abspath
class SCMClassTests(unittest.TestCase):
def setUp(self):
self.dev_null = open(os.devnull, "w")
def tearDown(self):
self.dev_null.close()
def test_run_command_with_pipe(self):
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
command_returns_non_zero = ['/bin/sh', '--invalid-option']
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertTrue(input_process.poll() != 0)
self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
def test_error_handlers(self):
git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
svn_failure_message="""svn: Commit failed (details follow):
svn: File or directory 'ChangeLog' is out of date; try updating
svn: resource out of date; try updating
"""
command_does_not_exist = ['does_not_exist', 'invalid_option']
self.assertRaises(OSError, run_command, command_does_not_exist)
self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
command_returns_non_zero = ['/bin/sh', '--invalid-option']
self.assertRaises(ScriptError, run_command, command_returns_non_zero)
self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
class SCMTest(unittest.TestCase):
def _create_patch(self, patch_contents):
attachment = Attachment({"bug_id": 12345}, None)
attachment.contents = lambda: patch_contents
joe_cool = Committer("Joe Cool", "joe@cool.com")
attachment.reviewer = lambda: joe_cool
return attachment
def _setup_webkittools_scripts_symlink(self, local_scm):
webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
webkit_scripts_directory = webkit_scm.scripts_directory()
local_scripts_directory = local_scm.scripts_directory()
os.mkdir(os.path.dirname(local_scripts_directory))
os.symlink(webkit_scripts_directory, local_scripts_directory)
def _shared_test_changed_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertEqual(self.scm.changed_files(), ["test_file"])
write_into_file_at_path("test_dir/test_file3", "new stuff")
self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
old_cwd = os.getcwd()
os.chdir("test_dir")
self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
os.chdir(old_cwd)
def _shared_test_added_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertEqual(self.scm.added_files(), [])
write_into_file_at_path("added_file", "new stuff")
self.scm.add("added_file")
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file2", "new stuff")
self.scm.add("added_dir")
added_files = self.scm.added_files()
if "added_dir" in added_files:
added_files.remove("added_dir")
self.assertEqual(added_files, ["added_dir/added_file2", "added_file"])
self.scm.clean_working_directory()
self.assertEqual(self.scm.added_files(), [])
self.assertFalse(os.path.exists("added_file"))
self.assertFalse(os.path.exists("added_dir"))
def _shared_test_changed_files_for_revision(self):
changed_files = self.scm.changed_files_for_revision(3)
if "test_dir" in changed_files:
changed_files.remove("test_dir")
self.assertEqual(changed_files, ["test_dir/test_file3", "test_file"])
self.assertEqual(sorted(self.scm.changed_files_for_revision(4)), sorted(["test_file", "test_file2"]))
self.assertEqual(self.scm.changed_files_for_revision(2), ["test_file"])
def _shared_test_contents_at_revision(self):
self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n")
self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file")
# Currently we raise instead of returning None because detecting the difference between
# "file not found" and any other error seems impossible with svn (git seems to expose such through the return code).
self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2)
self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
def _shared_test_revisions_changing_file(self):
self.assertEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
def _shared_test_committer_email_for_revision(self):
self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser()) # Committer "email" will be the current user
def _shared_test_reverse_diff(self):
self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
self.scm.apply_reverse_diff('5')
self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
def _shared_test_diff_for_revision(self):
r3_patch = self.scm.diff_for_revision(4)
self.assertTrue(re.search('test3', r3_patch))
self.assertFalse(re.search('test4', r3_patch))
self.assertTrue(re.search('test2', r3_patch))
self.assertTrue(re.search('test2', self.scm.diff_for_revision(3)))
def _shared_test_svn_apply_git_patch(self):
self._setup_webkittools_scripts_symlink(self.scm)
git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
new file mode 100644
index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
60151690
GIT binary patch
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
literal 0
HcmV?d00001
"""
self.checkout.apply_patch(self._create_patch(git_binary_addition))
added = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual(512, len(added))
self.assertTrue(added.startswith('GIF89a'))
self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
GIT binary patch
literal 7
OcmYex&reD$;sO8*F9L)B
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
"""
self.checkout.apply_patch(self._create_patch(git_binary_modification))
modified = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual('foobar\n', modified)
self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
deleted file mode 100644
index 323fae0..0000000
GIT binary patch
literal 0
HcmV?d00001
literal 7
OcmYex&reD$;sO8*F9L)B
"""
self.checkout.apply_patch(self._create_patch(git_binary_deletion))
self.assertFalse(os.path.exists('fizzbuzz7.gif'))
self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files())
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
def _shared_test_add_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertTrue("added_dir/added_file" in self.scm.added_files())
def _shared_test_delete_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertTrue("added_dir/added_file" in self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertFalse("added_dir" in self.scm.added_files())
def _shared_test_delete_recursively_or_not(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
write_into_file_at_path("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
self.assertTrue("added_dir/added_file" in self.scm.added_files())
self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
def _shared_test_exists(self, scm, commit_function):
os.chdir(scm.checkout_root)
self.assertFalse(scm.exists('foo.txt'))
write_into_file_at_path('foo.txt', 'some stuff')
self.assertFalse(scm.exists('foo.txt'))
scm.add('foo.txt')
commit_function('adding foo')
self.assertTrue(scm.exists('foo.txt'))
scm.delete('foo.txt')
commit_function('deleting foo')
self.assertFalse(scm.exists('foo.txt'))
def _shared_test_head_svn_revision(self):
self.assertEqual(self.scm.head_svn_revision(), '5')
class TimezoneOverride(object):
def __init__(self, timezone_string):
self._timezone_string = timezone_string
def __enter__(self):
if hasattr(time, 'tzset'):
self._saved_timezone = os.environ.get('TZ', None)
os.environ['TZ'] = self._timezone_string
time.tzset()
def __exit__(self, type, value, traceback):
if hasattr(time, 'tzset'):
if self._saved_timezone:
os.environ['TZ'] = self._saved_timezone
else:
del os.environ['TZ']
time.tzset()
class SVNTest(SCMTest):
@staticmethod
def _set_date_and_reviewer(changelog_entry):
changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
with TimezoneOverride('PST8PDT'):
return changelog_entry.replace('DATE_HERE', date.today().isoformat())
def test_svn_apply(self):
first_entry = """2009-10-26 Eric Seidel <eric@webkit.org>
Reviewed by Foo Bar.
Most awesome change ever.
* scm_unittest.py:
"""
intermediate_entry = """2009-10-27 Eric Seidel <eric@webkit.org>
Reviewed by Baz Bar.
A more awesomer change yet!
* scm_unittest.py:
"""
one_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -1,5 +1,13 @@
2009-10-26 Eric Seidel <eric@webkit.org>
%(whitespace)s
+ Reviewed by NOBODY (OOPS!).
+
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <eric@webkit.org>
+
Reviewed by Foo Bar.
%(whitespace)s
Most awesome change ever.
""" % {'whitespace': ' '}
one_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
Reviewed by REVIEWER_HERE.
Second most awesome change ever.
* scm_unittest.py:
"""
two_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -2,6 +2,14 @@
%(whitespace)s
Reviewed by Foo Bar.
%(whitespace)s
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
Most awesome change ever.
%(whitespace)s
* scm_unittest.py:
""" % {'whitespace': ' '}
two_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
Reviewed by Foo Bar.
Second most awesome change ever.
* scm_unittest.py:
"""
write_into_file_at_path('ChangeLog', first_entry)
run_command(['svn', 'add', 'ChangeLog'])
run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
# Patch files were created against just 'first_entry'.
# Add a second commit to make svn-apply have to apply the patches with fuzz.
changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
write_into_file_at_path('ChangeLog', changelog_contents)
run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(one_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
self.scm.revert_files(['ChangeLog'])
self.checkout.apply_patch(self._create_patch(two_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
def test_detect_scm_system_relative_url(self):
scm = detect_scm_system(".")
# I wanted to assert that we got the right path, but there was some
# crazy magic with temp folder names that I couldn't figure out.
self.assertTrue(scm.checkout_root)
def test_create_patch_is_full_patch(self):
test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2")
os.mkdir(test_dir_path)
test_file_path = os.path.join(test_dir_path, 'test_file2')
write_into_file_at_path(test_file_path, 'test content')
run_command(['svn', 'add', 'test_dir2'])
scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts')
os.makedirs(scripts_path)
create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD')
os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
os.chdir(test_dir_path)
scm = detect_scm_system(test_dir_path)
self.assertEqual(scm.checkout_root, self.svn_checkout_path)
patch_contents = scm.create_patch()
self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents)
def test_detection(self):
scm = detect_scm_system(self.svn_checkout_path)
self.assertEqual(scm.display_name(), "svn")
self.assertEqual(scm.supports_local_commits(), False)
def test_apply_small_binary_patch(self):
patch_contents = """Index: test_file.swf
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes on: test_file.swf
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
"""
expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
self._setup_webkittools_scripts_symlink(self.scm)
patch_file = self._create_patch(patch_contents)
self.checkout.apply_patch(patch_file)
actual_contents = read_from_path("test_file.swf", encoding=None)
self.assertEqual(actual_contents, expected_contents)
def test_apply_svn_patch(self):
scm = detect_scm_system(self.svn_checkout_path)
patch = self._create_patch(_svn_diff("-r5:4"))
self._setup_webkittools_scripts_symlink(scm)
Checkout(scm).apply_patch(patch)
def test_apply_svn_patch_force(self):
scm = detect_scm_system(self.svn_checkout_path)
patch = self._create_patch(_svn_diff("-r3:5"))
self._setup_webkittools_scripts_symlink(scm)
self.assertRaises(ScriptError, Checkout(scm).apply_patch, patch, force=True)
def test_commit_logs(self):
self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log()))
self.assertTrue(re.search('second commit', self.scm.svn_commit_log(3)))
def _shared_test_commit_with_message(self, username=None):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
self.scm.dryrun = True
write_into_file_at_path('test_file', 'still more test content')
commit_text = self.scm.commit_with_message("yet another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0')
def test_commit_in_subdir(self, username=None):
write_into_file_at_path('test_dir/test_file3', 'more test content')
os.chdir("test_dir")
commit_text = self.scm.commit_with_message("another test commit", username)
os.chdir("..")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_text_parsing(self):
self._shared_test_commit_with_message()
def test_commit_with_username(self):
self._shared_test_commit_with_message("dbates@webkit.org")
def test_commit_without_authorization(self):
self.scm.has_authorization_for_realm = lambda realm: False
self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
def test_has_authorization_for_realm_using_credentials_with_passtype(self):
credentials = """
K 8
passtype
V 8
keychain
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_has_authorization_for_realm_using_credentials_with_password(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
K 8
password
V 4
blah
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
scm = detect_scm_system(self.svn_checkout_path)
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
write_into_file_at_path(fake_webkit_auth_file, credentials)
result = scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
os.remove(fake_webkit_auth_file)
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
return result
def test_not_have_authorization_for_realm_with_credentials_missing_password_and_passtype(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
END
"""
self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
scm = detect_scm_system(self.svn_checkout_path)
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
self.assertFalse(scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_added_files(self):
self._shared_test_added_files()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
os.chdir(self.svn_checkout_path)
self.scm.delete("test_file")
self.assertTrue("test_file" in self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_propset_propget(self):
filepath = os.path.join(self.svn_checkout_path, "test_file")
expected_mime_type = "x-application/foo-bar"
self.scm.propset("svn:mime-type", expected_mime_type, filepath)
self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath))
def test_show_head(self):
write_into_file_at_path("test_file", u"Hello!", "utf-8")
SVNTestRepository._svn_commit("fourth commit")
self.assertEqual("Hello!", self.scm.show_head('test_file'))
def test_show_head_binary(self):
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def do_test_diff_for_file(self):
write_into_file_at_path('test_file', 'some content')
self.scm.commit_with_message("a test commit")
diff = self.scm.diff_for_file('test_file')
self.assertEqual(diff, "")
write_into_file_at_path("test_file", "changed content")
diff = self.scm.diff_for_file('test_file')
self.assertTrue("-some content" in diff)
self.assertTrue("+changed content" in diff)
def clean_bogus_dir(self):
self.bogus_dir = self.scm._bogus_dir_name()
if os.path.exists(self.bogus_dir):
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_existing_bogus_dir(self):
self.clean_bogus_dir()
os.mkdir(self.bogus_dir)
self.do_test_diff_for_file()
self.assertTrue(os.path.exists(self.bogus_dir))
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_missing_bogus_dir(self):
self.clean_bogus_dir()
self.do_test_diff_for_file()
self.assertFalse(os.path.exists(self.bogus_dir))
def test_svn_lock(self):
svn_root_lock_path = ".svn/lock"
write_into_file_at_path(svn_root_lock_path, "", "utf-8")
# webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
self.assertRaises(ScriptError, run_command, ['svn', 'update'])
self.scm.clean_working_directory()
self.assertFalse(os.path.exists(svn_root_lock_path))
run_command(['svn', 'update']) # Should succeed and not raise.
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_with_message)
class GitTest(SCMTest):
def setUp(self):
# FIXME: We should instead clone a git repo that is tracking an SVN repo.
# That better matches what we do with WebKit.
self.original_dir = os.getcwd()
self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2")
run_command(['git', 'init', self.untracking_checkout_path])
os.chdir(self.untracking_checkout_path)
write_into_file_at_path('foo_file', 'foo')
run_command(['git', 'add', 'foo_file'])
run_command(['git', 'commit', '-am', 'dummy commit'])
self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
os.chdir(self.tracking_git_checkout_path)
self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
def tearDown(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.tracking_git_checkout_path])
run_command(['rm', '-rf', self.untracking_checkout_path])
def test_remote_branch_ref(self):
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master')
os.chdir(self.untracking_checkout_path)
self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref)
def test_multiple_remotes(self):
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1')
def test_create_patch(self):
write_into_file_at_path('test_file_commit1', 'contents')
run_command(['git', 'add', 'test_file_commit1'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertFalse(re.search(r'Subversion Revision:', patch))
def test_exists(self):
scm = self.untracking_scm
self._shared_test_exists(scm, scm.commit_locally_with_message)
def test_head_svn_revision(self):
scm = detect_scm_system(self.untracking_checkout_path)
# If we cloned a git repo tracking an SVG repo, this would give the same result as
# self._shared_test_head_svn_revision().
self.assertEqual(scm.head_svn_revision(), '')
def test_rename_files(self):
scm = self.tracking_scm
run_command(['git', 'mv', 'foo_file', 'bar_file'])
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertFalse(re.search(r'rename from ', patch))
self.assertFalse(re.search(r'rename to ', patch))
class GitSVNTest(SCMTest):
def _setup_git_checkout(self):
self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
# --quiet doesn't make git svn silent, so we use run_silent to redirect output
run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
os.chdir(self.git_checkout_path)
def _tear_down_git_checkout(self):
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.git_checkout_path])
def setUp(self):
self.original_dir = os.getcwd()
SVNTestRepository.setup(self)
self._setup_git_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
self._tear_down_git_checkout()
def test_detection(self):
scm = detect_scm_system(self.git_checkout_path)
self.assertEqual(scm.display_name(), "git")
self.assertEqual(scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
value = 'git-config value'
run_command(['git', 'config', key, value])
self.assertEqual(self.scm.read_git_config(key), value)
def test_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
def test_discard_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
self.scm.discard_local_commits()
self.assertEqual(len(self.scm.local_commits()), 0)
def test_delete_branch(self):
new_branch = 'foo'
run_command(['git', 'checkout', '-b', new_branch])
self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
run_command(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
self.assertFalse(re.search(r'foo', run_command(['git', 'branch'])))
def test_remote_merge_base(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
self.assertFalse(re.search(r'foo', diff_to_common_base))
self.assertTrue(re.search(r'foo', diff_to_merge_base))
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
write_into_file_at_path(svn_test_file, "svn_checkout")
run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(git_test_file, "git_checkout")
run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase'])
scm = detect_scm_system(self.git_checkout_path)
self.assertTrue(scm.rebase_in_progress())
scm.clean_working_directory()
self.assertFalse(scm.rebase_in_progress())
scm.clean_working_directory()
def test_commitish_parsing(self):
scm = detect_scm_system(self.git_checkout_path)
# Multiple revisions are cherry-picked.
self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
# ... is an invalid range specifier
self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
def test_commitish_order(self):
scm = detect_scm_system(self.git_checkout_path)
commit_range = 'HEAD~3..HEAD'
actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
scm = detect_scm_system(self.git_checkout_path)
# We carefullly pick a diff which does not have a directory addition
# as currently svn-apply will error out when trying to remove directories
# in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
patch = self._create_patch(_git_diff('HEAD..HEAD^'))
self._setup_webkittools_scripts_symlink(scm)
Checkout(scm).apply_patch(patch)
def test_apply_git_patch_force(self):
scm = detect_scm_system(self.git_checkout_path)
patch = self._create_patch(_git_diff('HEAD~2..HEAD'))
self._setup_webkittools_scripts_symlink(scm)
self.assertRaises(ScriptError, Checkout(scm).apply_patch, patch, force=True)
def test_commit_text_parsing(self):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
self.scm.dryrun = True
write_into_file_at_path('test_file', 'still more test content')
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0')
def test_commit_with_message_working_copy_only(self):
write_into_file_at_path('test_file_commit1', 'more test content')
run_command(['git', 'add', 'test_file_commit1'])
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("yet another test commit")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def _local_commit(self, filename, contents, message):
write_into_file_at_path(filename, contents)
run_command(['git', 'add', filename])
self.scm.commit_locally_with_message(message)
def _one_local_commit(self):
self._local_commit('test_file_commit1', 'more test content', 'another test commit')
def _one_local_commit_plus_working_copy_changes(self):
self._one_local_commit()
write_into_file_at_path('test_file_commit2', 'still more test content')
run_command(['git', 'add', 'test_file_commit2'])
def _two_local_commits(self):
self._one_local_commit()
self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
def _three_local_commits(self):
self._local_commit('test_file_commit0', 'more test content', 'another test commit')
self._two_local_commits()
def test_revisions_changing_files_with_local_commit(self):
self._one_local_commit()
self.assertEquals(self.scm.revisions_changing_file('test_file_commit1'), [])
def test_commit_with_message(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_git_commit(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit", git_commit="HEAD^")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit1', svn_log))
self.assertFalse(re.search(r'test_file_commit2', svn_log))
def test_commit_with_message_git_commit_range(self):
self._three_local_commits()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertFalse(re.search(r'test_file_commit0', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
self.assertTrue(re.search(r'test_file_commit2', svn_log))
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit", git_commit="HEAD..")
self.assertFalse(re.search(r'test_file_commit1', svn_log))
self.assertTrue(re.search(r'test_file_commit2', svn_log))
def test_commit_with_message_only_local_commit(self):
self._one_local_commit()
scm = detect_scm_system(self.git_checkout_path)
commit_text = scm.commit_with_message("another test commit")
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_multiple_local_commits_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
commit_text = scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_git_commit_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", git_commit="HEAD^")
def test_commit_with_message_multiple_local_commits_always_squash(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
scm._assert_can_squash = lambda working_directory_is_clean: True
commit_text = scm.commit_with_message("yet another test commit")
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_multiple_local_commits(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
commit_text = scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertFalse(re.search(r'test_file2', svn_log))
self.assertTrue(re.search(r'test_file_commit2', svn_log))
self.assertTrue(re.search(r'test_file_commit1', svn_log))
def test_commit_with_message_not_synced_with_conflict(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._local_commit('test_file2', 'asdf', 'asdf commit')
scm = detect_scm_system(self.git_checkout_path)
# There's a conflict between trunk and the test_file2 modification.
self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", force_squash=True)
def test_remote_branch_ref(self):
self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk')
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'Subversion Revision: 5', patch))
def test_create_patch_after_merge(self):
run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
run_command(['git', 'merge', 'trunk'])
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'Subversion Revision: 5', patch))
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(changed_files=['test_file_commit2'])
self.assertTrue(re.search(r'test_file_commit2', patch))
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
os.remove('test_file_commit1')
patch = scm.create_patch()
patch_with_changed_files = scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEquals(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(git_commit="HEAD^")
self.assertTrue(re.search(r'test_file_commit1', patch))
self.assertFalse(re.search(r'test_file_commit2', patch))
def test_create_patch_git_commit_range(self):
self._three_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(git_commit="HEAD~2..HEAD")
self.assertFalse(re.search(r'test_file_commit0', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch(git_commit="HEAD..")
self.assertFalse(re.search(r'test_file_commit1', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
def test_create_patch_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
patch = scm.create_patch()
self.assertFalse(re.search(r'test_file2', patch))
self.assertTrue(re.search(r'test_file_commit2', patch))
self.assertTrue(re.search(r'test_file_commit1', patch))
def test_create_binary_patch(self):
scm = detect_scm_system(self.git_checkout_path)
test_file_name = 'binary_file'
test_file_path = os.path.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
patch = scm.create_patch()
self.assertTrue(re.search(r'\nliteral 0\n', patch))
self.assertTrue(re.search(r'\nliteral 256\n', patch))
run_command(['git', 'rm', '-f', test_file_name])
self._setup_webkittools_scripts_symlink(scm)
self.checkout.apply_patch(self._create_patch(patch))
self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
run_command(['git', 'commit', '-m', 'binary diff'])
patch_from_local_commit = scm.create_patch('HEAD')
self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit))
self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit))
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertTrue('test_file_commit1' in files)
self.assertTrue('test_file_commit2' in files)
def test_changed_files_git_commit(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files(git_commit="HEAD^")
self.assertTrue('test_file_commit1' in files)
self.assertFalse('test_file_commit2' in files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files(git_commit="HEAD~2..HEAD")
self.assertTrue('test_file_commit0' not in files)
self.assertTrue('test_file_commit1' in files)
self.assertTrue('test_file_commit2' in files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files(git_commit="HEAD..")
self.assertFalse('test_file_commit1' in files)
self.assertTrue('test_file_commit2' in files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertTrue('test_file_commit2' in files)
self.assertTrue('test_file_commit1' in files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertFalse('test_file2' in files)
self.assertTrue('test_file_commit2' in files)
self.assertTrue('test_file_commit1' in files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
scm = detect_scm_system(self.git_checkout_path)
files = scm.changed_files()
self.assertFalse('test_file2' in files)
self.assertTrue('test_file_commit2' in files)
self.assertTrue('test_file_commit1' in files)
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_added_files(self):
self._shared_test_added_files()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
self.assertTrue("test_file_commit1" in self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_to_object_name(self):
relpath = 'test_file_commit1'
fullpath = os.path.join(self.git_checkout_path, relpath)
self._two_local_commits()
self.assertEqual(relpath, self.scm.to_object_name(fullpath))
def test_show_head(self):
self._two_local_commits()
self.assertEqual("more test content", self.scm.show_head('test_file_commit1'))
def test_show_head_binary(self):
self._two_local_commits()
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_locally_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def test_diff_for_file(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', "Updated", encoding=None)
diff = self.scm.diff_for_file('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertTrue("+Updated" in diff)
self.assertTrue("-more test content" in diff)
self.scm.add('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertTrue("+Updated" in cached_diff)
self.assertTrue("-more test content" in cached_diff)
def test_exists(self):
scm = detect_scm_system(self.git_checkout_path)
self._shared_test_exists(scm, scm.commit_locally_with_message)
class GitTestWithMock(unittest.TestCase):
def make_scm(self, logging_executive=False):
scm = Git(cwd=None, executive=MockExecutive())
scm._executive._should_log = logging_executive
return scm
def test_create_patch(self):
scm = self.make_scm(logging_executive=True)
expected_stderr = "MOCK run_command: ['git', 'merge-base', u'refs/remotes/origin/master', 'HEAD'], cwd=%(checkout)s\nMOCK run_command: ['git', 'diff', '--binary', '--no-ext-diff', '--full-index', '-M', 'MOCK output of child process', '--'], cwd=%(checkout)s\nMOCK run_command: ['git', 'log', '-25'], cwd=None\n" % {'checkout': scm.checkout_root}
OutputCapture().assert_outputs(self, scm.create_patch, expected_stderr=expected_stderr)
def test_push_local_commits_to_server_with_username_and_password(self):
self.assertEquals(self.make_scm().push_local_commits_to_server(username='dbates@webkit.org', password='blah'), "MOCK output of child process")
def test_push_local_commits_to_server_without_username_and_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server)
def test_push_local_commits_to_server_with_username_and_without_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'username': 'dbates@webkit.org'})
def test_push_local_commits_to_server_without_username_and_with_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
if __name__ == '__main__':
unittest.main()
| true | true |
f7f4b5e6412ce2f783b94d79b418c3c22a8b467e | 756 | py | Python | pydantic_aioredis/config.py | estesistech/pydantic-aioredis | facf41d04fb68349ce2e15b5aa30e574e1ba3db3 | [
"MIT"
] | 20 | 2021-08-07T23:17:59.000Z | 2022-02-15T05:08:31.000Z | pydantic_aioredis/config.py | estesistech/pydantic-aioredis | facf41d04fb68349ce2e15b5aa30e574e1ba3db3 | [
"MIT"
] | 28 | 2021-10-08T22:02:29.000Z | 2022-03-30T19:28:49.000Z | pydantic_aioredis/config.py | andrewthetechie/pydantic-aioredis | 0b83d3366e112855892bcb7fe2e460aae8f67d7e | [
"MIT"
] | 7 | 2021-10-09T08:34:02.000Z | 2022-02-20T14:58:44.000Z | """Module containing the main config classes"""
from typing import Optional
from pydantic import BaseModel
class RedisConfig(BaseModel):
"""A config object for connecting to redis"""
host: str = "localhost"
port: int = 6379
db: int = 0
password: Optional[str] = None
ssl: bool = False
encoding: Optional[str] = "utf-8"
@property
def redis_url(self) -> str:
"""Returns a redis url to connect to"""
proto = "rediss" if self.ssl else "redis"
if self.password is None:
return f"{proto}://{self.host}:{self.port}/{self.db}"
return f"{proto}://:{self.password}@{self.host}:{self.port}/{self.db}"
class Config:
"""Pydantic schema config"""
orm_mode = True
| 26.068966 | 78 | 0.611111 | from typing import Optional
from pydantic import BaseModel
class RedisConfig(BaseModel):
host: str = "localhost"
port: int = 6379
db: int = 0
password: Optional[str] = None
ssl: bool = False
encoding: Optional[str] = "utf-8"
@property
def redis_url(self) -> str:
proto = "rediss" if self.ssl else "redis"
if self.password is None:
return f"{proto}://{self.host}:{self.port}/{self.db}"
return f"{proto}://:{self.password}@{self.host}:{self.port}/{self.db}"
class Config:
orm_mode = True
| true | true |
f7f4b5fe3dee9b4abc37bdfcf868b3decfa386b1 | 5,670 | py | Python | src/prms6bmi/prms6bmi/reader.py | nhm-usgs/bmi-test-projects | 9ed065f291f0b33be9a9faeb0a02b3a253f36e9e | [
"MIT"
] | null | null | null | src/prms6bmi/prms6bmi/reader.py | nhm-usgs/bmi-test-projects | 9ed065f291f0b33be9a9faeb0a02b3a253f36e9e | [
"MIT"
] | 1 | 2020-08-14T17:45:15.000Z | 2020-08-14T17:49:00.000Z | src/prms6bmi/prms6bmi/reader.py | nhm-usgs/bmi-test-projects | 9ed065f291f0b33be9a9faeb0a02b3a253f36e9e | [
"MIT"
] | null | null | null | """
Created on Thu Dec 12 08:00:48 2019
@author:rmcd build on pangeo package by Steve Markstrom - USGS
"""
import xarray as xr
import glob
import os
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def get_DataSet_prms6(summary, myparam):
# merge spatial locations of hru and segments into summary file
ds = xr.open_dataset(summary)
param = xr.open_dataset(myparam)
hru_lat = param.get("hru_lat")
ds['hru_lat'] = hru_lat
hru_lon = param.get("hru_lon")
ds['hru_lon'] = hru_lon
seg_lat = param.get("seg_lat")
ds['seg_lat'] = seg_lat
seg_lon = param.get("seg_lon")
ds['seg_lon'] = seg_lon
return ds
def bmi_prms6_value_splot(gdf, mbmi, value, tvmin, tvmax, index, timesel, pax = None):
tax = pax or plt.gca()
gdf[value] = mbmi.get_value(value)
divider = make_axes_locatable(tax)
tcax = divider.append_axes(position='right', size='5%', pad=0.1)
gdf.plot(column=value, vmin=tvmin, vmax=tvmax, ax=tax, legend=True, cax=tcax)
tax.set_title(value)
def plot_climate(c_xarray, hru_index, val, start, end, tax=None):
tax = tax or plt.gca()
hru_ids = c_xarray.hru.values
simclimate = c_xarray.sel(time=slice(start, end))
line, = simclimate.sel(hru=hru_ids[hru_index])[val].plot(ax=tax)
tax.set_title(val)
def bmi_prms6_value_plot(data, n_index, val, label, start, end, tax = None):
tax = tax or plt.gca()
#test if val exists in both and get nhru or nsegment
dim_type = None
try:
dim_type = data[val].dims[1]
if dim_type == 'nhru':
data_val = data[val].sel(nhru=n_index, time=slice(start, end)).to_pandas()
# dprms_val = dprms[val].sel(nhru=n_index, time=slice(start, end))
data_val.plot.line(ax=tax, label=label)
tax.legend()
# line1, = dprms_val.plot.line(x='time', ax=tax, add_legend=True)
elif dim_type == 'nsegment':
data_val = data[val].sel(nsegment=n_index, time=slice(start, end)).to_pandas()
# dprms_val = dprms[val].sel(nsegment=n_index, time=slice(start, end)).to_pandas()
data_val.plot(ax=tax, label=label)
tax.legend()
# line1, = dprms_val.plot(label='PRMS6')
tax.set_title(f'{val} {n_index}')
except Exception as err:
print('Error', {err})
def bmi_prms6_residual_plot(dbmi, dprms, n_index, val, label, start, end, tax = None):
tax = tax or plt.gca()
dim_type = dbmi[val].dims[1]
try:
if dim_type == 'nhru':
data_val = dbmi[val] - dprms[val]
data = data_val.sel(nhru=n_index, time=slice(start, end)).to_pandas()
# bmi = dbmi[val]
# prms = dprms.sel(nhru=n_index, time=slice(start, end))[val]
elif dim_type == 'nsegment':
data_val = dbmi[val] - dprms[val]
data = data_val.sel(nsegment=n_index, time=slice(start, end)).to_pandas()
# bmi = dbmi.sel[val]
# prms = dprms.sel(nsegment=n_index, time=slice(start, end))[val]
# res = prms-bmi
data.plot(ax=tax, label=label)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
tax.legend()
tax.set_title('Residual (prms-bmi)')
except Exception as err:
print('Error', {err})
def get_feat_coord(feat, data_set, feat_id):
lat_da = data_set[feat + '_lat']
lat = lat_da[feat_id-1].values
lon_da = data_set[feat + '_lon']
lon = lon_da[feat_id-1].values
return lat,lon
def get_hrus_for_box(ds, lat_min, lat_max, lon_min, lon_max):
sel = ds.hru_lat.sel(hruid=((ds.hru_lat.values >= lat_min)
& (ds.hru_lat.values <= lat_max)))
ids_1 = sel.hruid.values
sel_1 = ds.hru_lon.sel(hruid=ids_1)
sel_2 = sel_1.sel(hruid=((sel_1.values >= lon_min) & (sel_1.values <= lon_max)))
ids_2 = sel_2.hruid.values
return ids_2
def get_segs_for_box(ds, lat_min, lat_max, lon_min, lon_max):
sel = ds.seg_lat.sel(segid=((ds.seg_lat.values >= lat_min)
& (ds.seg_lat.values <= lat_max)))
ids_1 = sel.segid.values
sel_1 = ds.seg_lon.sel(segid=ids_1)
sel_2 = sel_1.sel(segid=((sel_1.values >= lon_min) & (sel_1.values <= lon_max)))
ids_2 = sel_2.segid.values
return ids_2
def get_values_for_DOY(ds, timestamp, hru_ids, var_name):
if (timestamp < pd.Timestamp('1979-10-01') or timestamp > pd.Timestamp('1980-09-30')):
print("The date you provided is outside of range 1979-10-01 to 1980-09-30")
return None
time_range = pd.date_range(timestamp, freq='1Y', periods=40)
dif = timestamp - time_range[0]
time_range = time_range + dif
# print(time_range)
date_list = []
val_list = []
for ts in time_range:
try:
date_str = str(ts.year).zfill(4) + '-' + str(ts.month).zfill(2) + '-' + str(ts.day).zfill(2)
ds_sel = ds[var_name].sel(hruid=hru_ids, time=date_str)
val = ds_sel.values[0][0]
date_list.append(date_str + 'T05:00:00')
val_list.append(val)
except:
pass
val_np = np.asarray(val_list, dtype=np.float64)
val_np = val_np.reshape((1, val_np.shape[0]))
hru_ids_np = np.asarray(hru_ids, dtype=np.int32)
date_np = np.asarray(date_list, dtype='datetime64[ns]')
attrs = ds[var_name].attrs
da_new = xr.DataArray(data=val_np, dims=['hruid','time'],
coords={'hruid':hru_ids_np,'time':date_np},
attrs=attrs)
return da_new
| 35.660377 | 104 | 0.621517 |
import xarray as xr
import glob
import os
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def get_DataSet_prms6(summary, myparam):
ds = xr.open_dataset(summary)
param = xr.open_dataset(myparam)
hru_lat = param.get("hru_lat")
ds['hru_lat'] = hru_lat
hru_lon = param.get("hru_lon")
ds['hru_lon'] = hru_lon
seg_lat = param.get("seg_lat")
ds['seg_lat'] = seg_lat
seg_lon = param.get("seg_lon")
ds['seg_lon'] = seg_lon
return ds
def bmi_prms6_value_splot(gdf, mbmi, value, tvmin, tvmax, index, timesel, pax = None):
tax = pax or plt.gca()
gdf[value] = mbmi.get_value(value)
divider = make_axes_locatable(tax)
tcax = divider.append_axes(position='right', size='5%', pad=0.1)
gdf.plot(column=value, vmin=tvmin, vmax=tvmax, ax=tax, legend=True, cax=tcax)
tax.set_title(value)
def plot_climate(c_xarray, hru_index, val, start, end, tax=None):
tax = tax or plt.gca()
hru_ids = c_xarray.hru.values
simclimate = c_xarray.sel(time=slice(start, end))
line, = simclimate.sel(hru=hru_ids[hru_index])[val].plot(ax=tax)
tax.set_title(val)
def bmi_prms6_value_plot(data, n_index, val, label, start, end, tax = None):
tax = tax or plt.gca()
dim_type = None
try:
dim_type = data[val].dims[1]
if dim_type == 'nhru':
data_val = data[val].sel(nhru=n_index, time=slice(start, end)).to_pandas()
data_val.plot.line(ax=tax, label=label)
tax.legend()
elif dim_type == 'nsegment':
data_val = data[val].sel(nsegment=n_index, time=slice(start, end)).to_pandas()
data_val.plot(ax=tax, label=label)
tax.legend()
tax.set_title(f'{val} {n_index}')
except Exception as err:
print('Error', {err})
def bmi_prms6_residual_plot(dbmi, dprms, n_index, val, label, start, end, tax = None):
tax = tax or plt.gca()
dim_type = dbmi[val].dims[1]
try:
if dim_type == 'nhru':
data_val = dbmi[val] - dprms[val]
data = data_val.sel(nhru=n_index, time=slice(start, end)).to_pandas()
elif dim_type == 'nsegment':
data_val = dbmi[val] - dprms[val]
data = data_val.sel(nsegment=n_index, time=slice(start, end)).to_pandas()
data.plot(ax=tax, label=label)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
tax.legend()
tax.set_title('Residual (prms-bmi)')
except Exception as err:
print('Error', {err})
def get_feat_coord(feat, data_set, feat_id):
lat_da = data_set[feat + '_lat']
lat = lat_da[feat_id-1].values
lon_da = data_set[feat + '_lon']
lon = lon_da[feat_id-1].values
return lat,lon
def get_hrus_for_box(ds, lat_min, lat_max, lon_min, lon_max):
sel = ds.hru_lat.sel(hruid=((ds.hru_lat.values >= lat_min)
& (ds.hru_lat.values <= lat_max)))
ids_1 = sel.hruid.values
sel_1 = ds.hru_lon.sel(hruid=ids_1)
sel_2 = sel_1.sel(hruid=((sel_1.values >= lon_min) & (sel_1.values <= lon_max)))
ids_2 = sel_2.hruid.values
return ids_2
def get_segs_for_box(ds, lat_min, lat_max, lon_min, lon_max):
sel = ds.seg_lat.sel(segid=((ds.seg_lat.values >= lat_min)
& (ds.seg_lat.values <= lat_max)))
ids_1 = sel.segid.values
sel_1 = ds.seg_lon.sel(segid=ids_1)
sel_2 = sel_1.sel(segid=((sel_1.values >= lon_min) & (sel_1.values <= lon_max)))
ids_2 = sel_2.segid.values
return ids_2
def get_values_for_DOY(ds, timestamp, hru_ids, var_name):
if (timestamp < pd.Timestamp('1979-10-01') or timestamp > pd.Timestamp('1980-09-30')):
print("The date you provided is outside of range 1979-10-01 to 1980-09-30")
return None
time_range = pd.date_range(timestamp, freq='1Y', periods=40)
dif = timestamp - time_range[0]
time_range = time_range + dif
date_list = []
val_list = []
for ts in time_range:
try:
date_str = str(ts.year).zfill(4) + '-' + str(ts.month).zfill(2) + '-' + str(ts.day).zfill(2)
ds_sel = ds[var_name].sel(hruid=hru_ids, time=date_str)
val = ds_sel.values[0][0]
date_list.append(date_str + 'T05:00:00')
val_list.append(val)
except:
pass
val_np = np.asarray(val_list, dtype=np.float64)
val_np = val_np.reshape((1, val_np.shape[0]))
hru_ids_np = np.asarray(hru_ids, dtype=np.int32)
date_np = np.asarray(date_list, dtype='datetime64[ns]')
attrs = ds[var_name].attrs
da_new = xr.DataArray(data=val_np, dims=['hruid','time'],
coords={'hruid':hru_ids_np,'time':date_np},
attrs=attrs)
return da_new
| true | true |
f7f4b6138cd1f4bfc9502c42c16210e9224ef1ad | 959 | py | Python | zoomus/components/recording.py | appfluence/zoomus | a14e1f08700b9dad89f00b0d5c2a73a24d421c78 | [
"Apache-2.0"
] | 2 | 2020-03-14T14:47:18.000Z | 2020-04-06T23:20:54.000Z | zoomus/components/recording.py | appfluence/zoomus | a14e1f08700b9dad89f00b0d5c2a73a24d421c78 | [
"Apache-2.0"
] | null | null | null | zoomus/components/recording.py | appfluence/zoomus | a14e1f08700b9dad89f00b0d5c2a73a24d421c78 | [
"Apache-2.0"
] | 1 | 2022-03-04T11:54:56.000Z | 2022-03-04T11:54:56.000Z | """Zoom.us REST API Python Client -- Recording component"""
__author__ = "Tomas Garzon"
__email__ = "tomasgarzonhervas@gmail.com"
from zoomus import util
from zoomus.components import base
class RecordingComponent(base.BaseComponent):
"""Component dealing with all recording related matters"""
def list(self, **kwargs):
util.require_keys(kwargs, 'host_id')
start = kwargs.pop('start', None)
if start:
kwargs['from'] = util.date_to_str(start)
end = kwargs.pop('end', None)
if end:
kwargs['to'] = util.date_to_str(end)
return self.post_request("/recording/list", params=kwargs)
def delete(self, **kwargs):
util.require_keys(kwargs, ['meeting_id'])
return self.post_request("/recording/delete", params=kwargs)
def get(self, **kwargs):
util.require_keys(kwargs, ['meeting_id'])
return self.post_request("/recording/get", params=kwargs)
| 31.966667 | 68 | 0.657977 |
__author__ = "Tomas Garzon"
__email__ = "tomasgarzonhervas@gmail.com"
from zoomus import util
from zoomus.components import base
class RecordingComponent(base.BaseComponent):
def list(self, **kwargs):
util.require_keys(kwargs, 'host_id')
start = kwargs.pop('start', None)
if start:
kwargs['from'] = util.date_to_str(start)
end = kwargs.pop('end', None)
if end:
kwargs['to'] = util.date_to_str(end)
return self.post_request("/recording/list", params=kwargs)
def delete(self, **kwargs):
util.require_keys(kwargs, ['meeting_id'])
return self.post_request("/recording/delete", params=kwargs)
def get(self, **kwargs):
util.require_keys(kwargs, ['meeting_id'])
return self.post_request("/recording/get", params=kwargs)
| true | true |
f7f4b70b5e8e3945792755b04b1491ff244620e7 | 1,516 | py | Python | expenses_report/visualizations/transaction_bubbles_visualization.py | kircher-sw/expenses-tracker | afd9550616a79f54dd119d91cec209c7748e9689 | [
"BSD-3-Clause"
] | 2 | 2019-07-24T16:01:12.000Z | 2021-07-21T01:51:33.000Z | expenses_report/visualizations/transaction_bubbles_visualization.py | kircher-sw/expenses-tracker | afd9550616a79f54dd119d91cec209c7748e9689 | [
"BSD-3-Clause"
] | null | null | null | expenses_report/visualizations/transaction_bubbles_visualization.py | kircher-sw/expenses-tracker | afd9550616a79f54dd119d91cec209c7748e9689 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from plotly import graph_objects as go
from expenses_report.chart_builder import ChartBuilder
from expenses_report.config import config
from expenses_report.preprocessing.data_provider import DataProvider
from expenses_report.visualizations.i_visualization import IVisualization
class TransactionBubblesVisualization(IVisualization):
_category_values = dict()
def prepare_data(self, data: DataProvider):
"""
Preprocesses each transaction and calculates the relative amount within its category
"""
RATIO = 'ratio'
df_all = data.get_all_transactions()
for category_name in config.categories.keys():
df_category = df_all[df_all[config.CATEGORY_MAIN_COL] == category_name]
category_total = df_category[config.ABSAMOUNT_COL].sum()
df_category.loc[:, RATIO] = df_category[config.ABSAMOUNT_COL] / category_total
x_axis = list(map(lambda datetime: pd.Timestamp(datetime), pd.DatetimeIndex(df_category.index).values))
if x_axis:
self._category_values[category_name] = (x_axis,
df_category[config.ABSAMOUNT_COL].values,
df_category[RATIO].values,
df_category[config.LABEL].values)
def build_visualization(self) -> go.Figure:
return ChartBuilder.create_bubble_chart(self._category_values)
| 45.939394 | 115 | 0.659631 | import pandas as pd
from plotly import graph_objects as go
from expenses_report.chart_builder import ChartBuilder
from expenses_report.config import config
from expenses_report.preprocessing.data_provider import DataProvider
from expenses_report.visualizations.i_visualization import IVisualization
class TransactionBubblesVisualization(IVisualization):
_category_values = dict()
def prepare_data(self, data: DataProvider):
RATIO = 'ratio'
df_all = data.get_all_transactions()
for category_name in config.categories.keys():
df_category = df_all[df_all[config.CATEGORY_MAIN_COL] == category_name]
category_total = df_category[config.ABSAMOUNT_COL].sum()
df_category.loc[:, RATIO] = df_category[config.ABSAMOUNT_COL] / category_total
x_axis = list(map(lambda datetime: pd.Timestamp(datetime), pd.DatetimeIndex(df_category.index).values))
if x_axis:
self._category_values[category_name] = (x_axis,
df_category[config.ABSAMOUNT_COL].values,
df_category[RATIO].values,
df_category[config.LABEL].values)
def build_visualization(self) -> go.Figure:
return ChartBuilder.create_bubble_chart(self._category_values)
| true | true |
f7f4b83c9aee3f00078a498cb0077d33a4ab6da8 | 10,551 | py | Python | python/ray/_private/runtime_env/utils.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 3 | 2021-06-24T17:00:18.000Z | 2021-09-20T15:49:11.000Z | python/ray/_private/runtime_env/utils.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 227 | 2021-10-01T08:00:01.000Z | 2021-12-28T16:47:26.000Z | python/ray/_private/runtime_env/utils.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 1 | 2020-12-03T20:36:00.000Z | 2020-12-03T20:36:00.000Z | from typing import Dict, List, Tuple, Any
import json
from ray.core.generated.runtime_env_common_pb2 \
import RuntimeEnv as ProtoRuntimeEnv
from google.protobuf import json_format
def _build_proto_pip_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
""" Construct pip runtime env protobuf from runtime env dict.
"""
if runtime_env_dict.get("pip"):
runtime_env.pip_runtime_env.config.packages.extend(
runtime_env_dict["pip"])
def _parse_proto_pip_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
""" Parse pip runtime env protobuf to runtime env dict.
"""
if runtime_env.HasField("pip_runtime_env"):
runtime_env_dict["pip"] = list(
runtime_env.pip_runtime_env.config.packages)
def _build_proto_conda_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
""" Construct conda runtime env protobuf from runtime env dict.
"""
if runtime_env_dict.get("conda"):
if isinstance(runtime_env_dict["conda"], str):
runtime_env.conda_runtime_env.conda_env_name = runtime_env_dict[
"conda"]
else:
runtime_env.conda_runtime_env.config = json.dumps(
runtime_env_dict["conda"], sort_keys=True)
def _parse_proto_conda_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
""" Parse conda runtime env protobuf to runtime env dict.
"""
if runtime_env.HasField("conda_runtime_env"):
runtime_env_dict["conda"] = json.loads(
runtime_env.conda_runtime_env.config)
def _build_proto_container_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
""" Construct container runtime env protobuf from runtime env dict.
"""
if runtime_env_dict.get("container"):
container = runtime_env_dict["container"]
runtime_env.py_container_runtime_env.image = container.get("image", "")
runtime_env.py_container_runtime_env.worker_path = container.get(
"worker_path", "")
runtime_env.py_container_runtime_env.run_options.extend(
container.get("run_options", []))
def _parse_proto_container_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
""" Parse container runtime env protobuf to runtime env dict.
"""
if runtime_env.HasField("py_container_runtime_env"):
runtime_env_dict["container"][
"image"] = runtime_env.container_runtime_env.image
runtime_env_dict["container"][
"worker_path"] = runtime_env.container_runtime_env.worker_path
runtime_env_dict["container"]["run_options"] = list(
runtime_env.container_runtime_env.run_options)
def _build_proto_plugin_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
""" Construct plugin runtime env protobuf from runtime env dict.
"""
if runtime_env_dict.get("plugins"):
for class_path, plugin_field in runtime_env_dict["plugins"].items():
plugin = runtime_env.py_plugin_runtime_env.plugins.add()
plugin.class_path = class_path
plugin.config = json.dumps(plugin_field, sort_keys=True)
def _parse_proto_plugin_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
""" Parse plugin runtime env protobuf to runtime env dict.
"""
if runtime_env.HasField("py_plugin_runtime_env"):
for plugin in runtime_env.py_plugin_runtime_env.plugins:
runtime_env_dict["plugins"][plugin.class_path] = dict(
json.loads(plugin.config))
class RuntimeEnv:
"""
A wrap class of runtime env protobuf.
"""
def __init__(self,
serialized_runtime_env=None,
proto_runtime_env: ProtoRuntimeEnv = None):
if serialized_runtime_env:
self._proto_runtime_env = json_format.Parse(
serialized_runtime_env, ProtoRuntimeEnv())
elif proto_runtime_env:
self._proto_runtime_env = proto_runtime_env
else:
self._proto_runtime_env = ProtoRuntimeEnv()
def to_dict(self) -> Dict:
initialize_dict: Dict[str, Any] = {}
if self._proto_runtime_env.py_modules:
initialize_dict["py_modules"] = list(
self._proto_runtime_env.py_modules)
if self._proto_runtime_env.working_dir:
initialize_dict[
"working_dir"] = self._proto_runtime_env.working_dir
if self._proto_runtime_env.env_vars:
initialize_dict["env_vars"] = dict(
self._proto_runtime_env.env_vars)
if self._proto_runtime_env.extensions:
initialize_dict.update(dict(self._proto_runtime_env.extensions))
_parse_proto_pip_runtime_env(self._proto_runtime_env, initialize_dict)
_parse_proto_conda_runtime_env(self._proto_runtime_env,
initialize_dict)
_parse_proto_container_runtime_env(self._proto_runtime_env,
initialize_dict)
_parse_proto_plugin_runtime_env(self._proto_runtime_env,
initialize_dict)
return initialize_dict
def has_uris(self) -> bool:
uris = self._proto_runtime_env.uris
if uris.working_dir_uri \
or uris.py_modules_uris \
or uris.conda_uri \
or uris.pip_uri \
or uris.plugin_uris:
return True
return False
def working_dir_uri(self) -> str:
return self._proto_runtime_env.uris.working_dir_uri
def py_modules_uris(self) -> List[str]:
return list(self._proto_runtime_env.uris.py_modules_uris)
def conda_uri(self) -> str:
return self._proto_runtime_env.uris.conda_uri
def pip_uri(self) -> str:
return self._proto_runtime_env.uris.pip_uri
def plugin_uris(self) -> List[str]:
return list(self._proto_runtime_env.uris.plugin_uris)
def working_dir(self) -> str:
return self._proto_runtime_env.working_dir
def py_modules(self) -> List[str]:
return list(self._proto_runtime_env.py_modules)
def env_vars(self) -> Dict:
return dict(self._proto_runtime_env.env_vars)
def plugins(self) -> List[Tuple[str, str]]:
result = list()
for plugin in self._proto_runtime_env.py_plugin_runtime_env.plugins:
result.append((plugin.class_path, plugin.config))
return result
def has_conda(self) -> str:
return self._proto_runtime_env.HasField("conda_runtime_env")
def conda_env_name(self) -> str:
if not self.has_conda():
return None
if not self._proto_runtime_env.conda_runtime_env.HasField(
"conda_env_name"):
return None
return self._proto_runtime_env.conda_runtime_env.conda_env_name
def conda_config(self) -> str:
if not self.has_conda():
return None
if not self._proto_runtime_env.conda_runtime_env.HasField("config"):
return None
return self._proto_runtime_env.conda_runtime_env.config
def has_pip(self) -> bool:
return self._proto_runtime_env.HasField("pip_runtime_env")
def pip_packages(self) -> List:
if not self.has_pip():
return []
return list(self._proto_runtime_env.pip_runtime_env.config.packages)
def serialize(self) -> str:
# Sort the keys we can compare the serialized string for equality.
return json.dumps(
json.loads(json_format.MessageToJson(self._proto_runtime_env)),
sort_keys=True)
def get_extension(self, key) -> str:
return self._proto_runtime_env.extensions.get(key)
def has_py_container(self) -> bool:
return self._proto_runtime_env.HasField("py_container_runtime_env")
def py_container_image(self) -> str:
if not self.has_py_container():
return None
return self._proto_runtime_env.py_container_runtime_env.image
def py_container_run_options(self) -> List:
if not self.has_py_container():
return None
return list(
self._proto_runtime_env.py_container_runtime_env.run_options)
@classmethod
def from_dict(cls, runtime_env_dict: Dict[str, Any], conda_get_uri_fn,
pip_get_uri_fn) -> "RuntimeEnv":
proto_runtime_env = ProtoRuntimeEnv()
proto_runtime_env.py_modules.extend(
runtime_env_dict.get("py_modules", []))
proto_runtime_env.working_dir = runtime_env_dict.get("working_dir", "")
if "working_dir" in runtime_env_dict:
proto_runtime_env.uris.working_dir_uri = runtime_env_dict[
"working_dir"]
if "py_modules" in runtime_env_dict:
for uri in runtime_env_dict["py_modules"]:
proto_runtime_env.uris.py_modules_uris.append(uri)
if "conda" in runtime_env_dict:
uri = conda_get_uri_fn(runtime_env_dict)
if uri is not None:
proto_runtime_env.uris.conda_uri = uri
if "pip" in runtime_env_dict:
uri = pip_get_uri_fn(runtime_env_dict)
if uri is not None:
proto_runtime_env.uris.pip_uri = uri
env_vars = runtime_env_dict.get("env_vars", {})
proto_runtime_env.env_vars.update(env_vars.items())
if "_ray_release" in runtime_env_dict:
proto_runtime_env.extensions["_ray_release"] = str(
runtime_env_dict["_ray_release"])
if "_ray_commit" in runtime_env_dict:
proto_runtime_env.extensions["_ray_commit"] = str(
runtime_env_dict["_ray_commit"])
if "_inject_current_ray" in runtime_env_dict:
proto_runtime_env.extensions["_inject_current_ray"] = str(
runtime_env_dict["_inject_current_ray"])
_build_proto_pip_runtime_env(runtime_env_dict, proto_runtime_env)
_build_proto_conda_runtime_env(runtime_env_dict, proto_runtime_env)
_build_proto_container_runtime_env(runtime_env_dict, proto_runtime_env)
_build_proto_plugin_runtime_env(runtime_env_dict, proto_runtime_env)
return cls(proto_runtime_env=proto_runtime_env)
| 40.737452 | 79 | 0.659084 | from typing import Dict, List, Tuple, Any
import json
from ray.core.generated.runtime_env_common_pb2 \
import RuntimeEnv as ProtoRuntimeEnv
from google.protobuf import json_format
def _build_proto_pip_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
if runtime_env_dict.get("pip"):
runtime_env.pip_runtime_env.config.packages.extend(
runtime_env_dict["pip"])
def _parse_proto_pip_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
if runtime_env.HasField("pip_runtime_env"):
runtime_env_dict["pip"] = list(
runtime_env.pip_runtime_env.config.packages)
def _build_proto_conda_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
if runtime_env_dict.get("conda"):
if isinstance(runtime_env_dict["conda"], str):
runtime_env.conda_runtime_env.conda_env_name = runtime_env_dict[
"conda"]
else:
runtime_env.conda_runtime_env.config = json.dumps(
runtime_env_dict["conda"], sort_keys=True)
def _parse_proto_conda_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
if runtime_env.HasField("conda_runtime_env"):
runtime_env_dict["conda"] = json.loads(
runtime_env.conda_runtime_env.config)
def _build_proto_container_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
if runtime_env_dict.get("container"):
container = runtime_env_dict["container"]
runtime_env.py_container_runtime_env.image = container.get("image", "")
runtime_env.py_container_runtime_env.worker_path = container.get(
"worker_path", "")
runtime_env.py_container_runtime_env.run_options.extend(
container.get("run_options", []))
def _parse_proto_container_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
if runtime_env.HasField("py_container_runtime_env"):
runtime_env_dict["container"][
"image"] = runtime_env.container_runtime_env.image
runtime_env_dict["container"][
"worker_path"] = runtime_env.container_runtime_env.worker_path
runtime_env_dict["container"]["run_options"] = list(
runtime_env.container_runtime_env.run_options)
def _build_proto_plugin_runtime_env(runtime_env_dict: dict,
runtime_env: ProtoRuntimeEnv):
if runtime_env_dict.get("plugins"):
for class_path, plugin_field in runtime_env_dict["plugins"].items():
plugin = runtime_env.py_plugin_runtime_env.plugins.add()
plugin.class_path = class_path
plugin.config = json.dumps(plugin_field, sort_keys=True)
def _parse_proto_plugin_runtime_env(runtime_env: ProtoRuntimeEnv,
runtime_env_dict: dict):
if runtime_env.HasField("py_plugin_runtime_env"):
for plugin in runtime_env.py_plugin_runtime_env.plugins:
runtime_env_dict["plugins"][plugin.class_path] = dict(
json.loads(plugin.config))
class RuntimeEnv:
def __init__(self,
serialized_runtime_env=None,
proto_runtime_env: ProtoRuntimeEnv = None):
if serialized_runtime_env:
self._proto_runtime_env = json_format.Parse(
serialized_runtime_env, ProtoRuntimeEnv())
elif proto_runtime_env:
self._proto_runtime_env = proto_runtime_env
else:
self._proto_runtime_env = ProtoRuntimeEnv()
def to_dict(self) -> Dict:
initialize_dict: Dict[str, Any] = {}
if self._proto_runtime_env.py_modules:
initialize_dict["py_modules"] = list(
self._proto_runtime_env.py_modules)
if self._proto_runtime_env.working_dir:
initialize_dict[
"working_dir"] = self._proto_runtime_env.working_dir
if self._proto_runtime_env.env_vars:
initialize_dict["env_vars"] = dict(
self._proto_runtime_env.env_vars)
if self._proto_runtime_env.extensions:
initialize_dict.update(dict(self._proto_runtime_env.extensions))
_parse_proto_pip_runtime_env(self._proto_runtime_env, initialize_dict)
_parse_proto_conda_runtime_env(self._proto_runtime_env,
initialize_dict)
_parse_proto_container_runtime_env(self._proto_runtime_env,
initialize_dict)
_parse_proto_plugin_runtime_env(self._proto_runtime_env,
initialize_dict)
return initialize_dict
def has_uris(self) -> bool:
uris = self._proto_runtime_env.uris
if uris.working_dir_uri \
or uris.py_modules_uris \
or uris.conda_uri \
or uris.pip_uri \
or uris.plugin_uris:
return True
return False
def working_dir_uri(self) -> str:
return self._proto_runtime_env.uris.working_dir_uri
def py_modules_uris(self) -> List[str]:
return list(self._proto_runtime_env.uris.py_modules_uris)
def conda_uri(self) -> str:
return self._proto_runtime_env.uris.conda_uri
def pip_uri(self) -> str:
return self._proto_runtime_env.uris.pip_uri
def plugin_uris(self) -> List[str]:
return list(self._proto_runtime_env.uris.plugin_uris)
def working_dir(self) -> str:
return self._proto_runtime_env.working_dir
def py_modules(self) -> List[str]:
return list(self._proto_runtime_env.py_modules)
def env_vars(self) -> Dict:
return dict(self._proto_runtime_env.env_vars)
def plugins(self) -> List[Tuple[str, str]]:
result = list()
for plugin in self._proto_runtime_env.py_plugin_runtime_env.plugins:
result.append((plugin.class_path, plugin.config))
return result
def has_conda(self) -> str:
return self._proto_runtime_env.HasField("conda_runtime_env")
def conda_env_name(self) -> str:
if not self.has_conda():
return None
if not self._proto_runtime_env.conda_runtime_env.HasField(
"conda_env_name"):
return None
return self._proto_runtime_env.conda_runtime_env.conda_env_name
def conda_config(self) -> str:
if not self.has_conda():
return None
if not self._proto_runtime_env.conda_runtime_env.HasField("config"):
return None
return self._proto_runtime_env.conda_runtime_env.config
def has_pip(self) -> bool:
return self._proto_runtime_env.HasField("pip_runtime_env")
def pip_packages(self) -> List:
if not self.has_pip():
return []
return list(self._proto_runtime_env.pip_runtime_env.config.packages)
def serialize(self) -> str:
return json.dumps(
json.loads(json_format.MessageToJson(self._proto_runtime_env)),
sort_keys=True)
def get_extension(self, key) -> str:
return self._proto_runtime_env.extensions.get(key)
def has_py_container(self) -> bool:
return self._proto_runtime_env.HasField("py_container_runtime_env")
def py_container_image(self) -> str:
if not self.has_py_container():
return None
return self._proto_runtime_env.py_container_runtime_env.image
def py_container_run_options(self) -> List:
if not self.has_py_container():
return None
return list(
self._proto_runtime_env.py_container_runtime_env.run_options)
@classmethod
def from_dict(cls, runtime_env_dict: Dict[str, Any], conda_get_uri_fn,
pip_get_uri_fn) -> "RuntimeEnv":
proto_runtime_env = ProtoRuntimeEnv()
proto_runtime_env.py_modules.extend(
runtime_env_dict.get("py_modules", []))
proto_runtime_env.working_dir = runtime_env_dict.get("working_dir", "")
if "working_dir" in runtime_env_dict:
proto_runtime_env.uris.working_dir_uri = runtime_env_dict[
"working_dir"]
if "py_modules" in runtime_env_dict:
for uri in runtime_env_dict["py_modules"]:
proto_runtime_env.uris.py_modules_uris.append(uri)
if "conda" in runtime_env_dict:
uri = conda_get_uri_fn(runtime_env_dict)
if uri is not None:
proto_runtime_env.uris.conda_uri = uri
if "pip" in runtime_env_dict:
uri = pip_get_uri_fn(runtime_env_dict)
if uri is not None:
proto_runtime_env.uris.pip_uri = uri
env_vars = runtime_env_dict.get("env_vars", {})
proto_runtime_env.env_vars.update(env_vars.items())
if "_ray_release" in runtime_env_dict:
proto_runtime_env.extensions["_ray_release"] = str(
runtime_env_dict["_ray_release"])
if "_ray_commit" in runtime_env_dict:
proto_runtime_env.extensions["_ray_commit"] = str(
runtime_env_dict["_ray_commit"])
if "_inject_current_ray" in runtime_env_dict:
proto_runtime_env.extensions["_inject_current_ray"] = str(
runtime_env_dict["_inject_current_ray"])
_build_proto_pip_runtime_env(runtime_env_dict, proto_runtime_env)
_build_proto_conda_runtime_env(runtime_env_dict, proto_runtime_env)
_build_proto_container_runtime_env(runtime_env_dict, proto_runtime_env)
_build_proto_plugin_runtime_env(runtime_env_dict, proto_runtime_env)
return cls(proto_runtime_env=proto_runtime_env)
| true | true |
f7f4b847b74d74dfe53484e104e3962c9bfcfcd6 | 10,548 | py | Python | django_project/django_project/settings.py | jsolly/shower-thought-blog | b1f61b50f20b6cc20a10f87bbd6d9532dc0b06c5 | [
"MIT"
] | null | null | null | django_project/django_project/settings.py | jsolly/shower-thought-blog | b1f61b50f20b6cc20a10f87bbd6d9532dc0b06c5 | [
"MIT"
] | null | null | null | django_project/django_project/settings.py | jsolly/shower-thought-blog | b1f61b50f20b6cc20a10f87bbd6d9532dc0b06c5 | [
"MIT"
] | null | null | null | """
Django settings for django_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
GIT_TOKEN = os.environ["GIT_TOKEN"]
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ["SECRET_KEY"]
SITE_ID = 1 # blogthedata.com
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
CAPTCHA_TEST_MODE = False
USE_SRI = True
# HTTPS SETTINGS
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
# HSTS SETTINGS
SECURE_HSTS_SECONDS = 31557600
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Content Security Policy
CSP_DEFAULT_SRC = ("'none'",)
CSP_STYLE_SRC = ("'self'", "https://cdn.jsdelivr.net", "'unsafe-inline'")
CSP_SCRIPT_SRC = (
"'self'",
"https://cdn.jsdelivr.net",
)
CSP_IMG_SRC = ("'self'", "data:")
CSP_FONT_SRC = ("'self'",)
CSP_CONNECT_SRC = ("'self'",)
CSP_FRAME_SRC = ("*",)
CSP_FRAME_ANCESTORS = ("'none'",)
CSP_BASE_URI = ("'none'",)
CSP_FORM_ACTION = ("'self'", "https://blogthedata.us14.list-manage.com")
CSP_OBJECT_SRC = ("'none'",)
CSP_REQUIRE_TRUSTED_TYPES_FOR = ("'script'",)
if os.environ["DEBUG"] == "True":
SITE_ID = 2
DEBUG = True
CAPTCHA_TEST_MODE = True
# HTTPS SETTINGS
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_HTTPONLY = False
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
CSRF_COOKIE_HTTPONLY = True
# HSTS SETTINGS
SECURE_HSTS_SECONDS = 31557600
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
ALLOWED_HOSTS = os.environ["ALLOWED_HOSTS"].split(" ")
# Application definition
INSTALLED_APPS = [
"blog.apps.BlogConfig",
"users.apps.UsersConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.sites",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
"captcha",
"django_ckeditor_5",
"admin_honeypot",
"robots",
"sri",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.utils.deprecation.MiddlewareMixin",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"csp.middleware.CSPMiddleware",
]
ROOT_URLCONF = "django_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"debug": True,
},
}
]
WSGI_APPLICATION = "django_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "blogthedata",
"USER": "postgres",
"PASSWORD": os.environ["POSTGRES_PASS"],
"HOST": "localhost",
"PORT": "5432",
}
}
if os.environ["MODE"] in ("TEST", "GITACTIONS"):
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "staticfiles"),
]
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
CKEDITOR_UPLOAD_PATH = "uploads/"
LOGIN_REDIRECT_URL = "blog-home"
LOGIN_URL = "login"
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = os.environ["EMAIL_HOST_PASSWORD"]
DEFAULT_FROM_EMAIL = os.environ["FROM_EMAIL"]
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# -----FASTDEV-----
FASTDEV_STRICT_IF = True
customColorPalette = [
{"color": "hsl(4, 90%, 58%)", "label": "Red"},
{"color": "hsl(340, 82%, 52%)", "label": "Pink"},
{"color": "hsl(291, 64%, 42%)", "label": "Purple"},
{"color": "hsl(262, 52%, 47%)", "label": "Deep Purple"},
{"color": "hsl(231, 48%, 48%)", "label": "Indigo"},
{"color": "hsl(207, 90%, 54%)", "label": "Blue"},
]
CKEDITOR_5_CONFIGS = {
"default": {
"toolbar": [
"heading",
"|",
"bold",
"italic",
"link",
"bulletedList",
"numberedList",
"blockQuote",
"imageUpload",
"RemoveFormat",
],
},
"extends": {
"link": {"addTargetToExternalLinks": "true"},
"codeBlock": {
"languages": [
{"language": "python", "label": "Python"},
{"language": "css", "label": "CSS"},
{"language": "yaml", "label": "YAML"},
{"language": "json", "label": "JSON"},
{"language": "git", "label": "Git"},
{"language": "sql", "label": "SQL"},
{"language": "html", "label": "HTML"},
{"language": "bash", "label": "BASH"},
{"language": "javascript", "label": "JavaScript"},
{"language": "apacheconf", "label": "ApacheConf"},
]
},
"blockToolbar": [
"paragraph",
"heading1",
"heading2",
"heading3",
"|",
"bulletedList",
"numberedList",
"|",
"blockQuote",
"imageUpload",
],
"toolbar": [
"heading",
"|",
"outdent",
"indent",
"|",
"bold",
"italic",
"link",
"underline",
"strikethrough",
"code",
"subscript",
"superscript",
"highlight",
"|",
"codeBlock",
"bulletedList",
"numberedList",
"todoList",
"|",
"blockQuote",
"imageUpload",
"|",
"fontSize",
"fontFamily",
"fontColor",
"fontBackgroundColor",
"mediaEmbed",
"removeFormat",
"insertTable",
],
"image": {
"toolbar": [
"imageTextAlternative",
"|",
"imageStyle:alignLeft",
"imageStyle:alignRight",
"imageStyle:alignCenter",
"imageStyle:side",
"|",
],
"styles": [
"full",
"side",
"alignLeft",
"alignRight",
"alignCenter",
],
},
"table": {
"contentToolbar": [
"tableColumn",
"tableRow",
"mergeTableCells",
"tableProperties",
"tableCellProperties",
],
"tableProperties": {
"borderColors": customColorPalette,
"backgroundColors": customColorPalette,
},
"tableCellProperties": {
"borderColors": customColorPalette,
"backgroundColors": customColorPalette,
},
},
"heading": {
"options": [
{
"model": "paragraph",
"title": "Paragraph",
"class": "ck-heading_paragraph",
},
{
"model": "heading1",
"view": "h1",
"title": "Heading 1",
"class": "ck-heading_heading1",
},
{
"model": "heading2",
"view": "h2",
"title": "Heading 2",
"class": "ck-heading_heading2",
},
{
"model": "heading3",
"view": "h3",
"title": "Heading 3",
"class": "ck-heading_heading3",
},
]
},
},
"list": {
"properties": {
"styles": "true",
"startIndex": "true",
"reversed": "true",
}
},
}
CKEDITOR_5_FILE_STORAGE = "blog.storage.CustomStorage"
| 28.203209 | 91 | 0.552522 | import os
from dotenv import load_dotenv
load_dotenv()
GIT_TOKEN = os.environ["GIT_TOKEN"]
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ["SECRET_KEY"]
SITE_ID = 1
DEBUG = False
CAPTCHA_TEST_MODE = False
USE_SRI = True
# HTTPS SETTINGS
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
# HSTS SETTINGS
SECURE_HSTS_SECONDS = 31557600
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Content Security Policy
CSP_DEFAULT_SRC = ("'none'",)
CSP_STYLE_SRC = ("'self'", "https://cdn.jsdelivr.net", "'unsafe-inline'")
CSP_SCRIPT_SRC = (
"'self'",
"https://cdn.jsdelivr.net",
)
CSP_IMG_SRC = ("'self'", "data:")
CSP_FONT_SRC = ("'self'",)
CSP_CONNECT_SRC = ("'self'",)
CSP_FRAME_SRC = ("*",)
CSP_FRAME_ANCESTORS = ("'none'",)
CSP_BASE_URI = ("'none'",)
CSP_FORM_ACTION = ("'self'", "https://blogthedata.us14.list-manage.com")
CSP_OBJECT_SRC = ("'none'",)
CSP_REQUIRE_TRUSTED_TYPES_FOR = ("'script'",)
if os.environ["DEBUG"] == "True":
SITE_ID = 2
DEBUG = True
CAPTCHA_TEST_MODE = True
# HTTPS SETTINGS
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_HTTPONLY = False
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
CSRF_COOKIE_HTTPONLY = True
# HSTS SETTINGS
SECURE_HSTS_SECONDS = 31557600
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
ALLOWED_HOSTS = os.environ["ALLOWED_HOSTS"].split(" ")
# Application definition
INSTALLED_APPS = [
"blog.apps.BlogConfig",
"users.apps.UsersConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.sites",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
"captcha",
"django_ckeditor_5",
"admin_honeypot",
"robots",
"sri",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.utils.deprecation.MiddlewareMixin",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"csp.middleware.CSPMiddleware",
]
ROOT_URLCONF = "django_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"debug": True,
},
}
]
WSGI_APPLICATION = "django_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "blogthedata",
"USER": "postgres",
"PASSWORD": os.environ["POSTGRES_PASS"],
"HOST": "localhost",
"PORT": "5432",
}
}
if os.environ["MODE"] in ("TEST", "GITACTIONS"):
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "staticfiles"),
]
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
CKEDITOR_UPLOAD_PATH = "uploads/"
LOGIN_REDIRECT_URL = "blog-home"
LOGIN_URL = "login"
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = os.environ["EMAIL_HOST_PASSWORD"]
DEFAULT_FROM_EMAIL = os.environ["FROM_EMAIL"]
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# -----FASTDEV-----
FASTDEV_STRICT_IF = True
customColorPalette = [
{"color": "hsl(4, 90%, 58%)", "label": "Red"},
{"color": "hsl(340, 82%, 52%)", "label": "Pink"},
{"color": "hsl(291, 64%, 42%)", "label": "Purple"},
{"color": "hsl(262, 52%, 47%)", "label": "Deep Purple"},
{"color": "hsl(231, 48%, 48%)", "label": "Indigo"},
{"color": "hsl(207, 90%, 54%)", "label": "Blue"},
]
CKEDITOR_5_CONFIGS = {
"default": {
"toolbar": [
"heading",
"|",
"bold",
"italic",
"link",
"bulletedList",
"numberedList",
"blockQuote",
"imageUpload",
"RemoveFormat",
],
},
"extends": {
"link": {"addTargetToExternalLinks": "true"},
"codeBlock": {
"languages": [
{"language": "python", "label": "Python"},
{"language": "css", "label": "CSS"},
{"language": "yaml", "label": "YAML"},
{"language": "json", "label": "JSON"},
{"language": "git", "label": "Git"},
{"language": "sql", "label": "SQL"},
{"language": "html", "label": "HTML"},
{"language": "bash", "label": "BASH"},
{"language": "javascript", "label": "JavaScript"},
{"language": "apacheconf", "label": "ApacheConf"},
]
},
"blockToolbar": [
"paragraph",
"heading1",
"heading2",
"heading3",
"|",
"bulletedList",
"numberedList",
"|",
"blockQuote",
"imageUpload",
],
"toolbar": [
"heading",
"|",
"outdent",
"indent",
"|",
"bold",
"italic",
"link",
"underline",
"strikethrough",
"code",
"subscript",
"superscript",
"highlight",
"|",
"codeBlock",
"bulletedList",
"numberedList",
"todoList",
"|",
"blockQuote",
"imageUpload",
"|",
"fontSize",
"fontFamily",
"fontColor",
"fontBackgroundColor",
"mediaEmbed",
"removeFormat",
"insertTable",
],
"image": {
"toolbar": [
"imageTextAlternative",
"|",
"imageStyle:alignLeft",
"imageStyle:alignRight",
"imageStyle:alignCenter",
"imageStyle:side",
"|",
],
"styles": [
"full",
"side",
"alignLeft",
"alignRight",
"alignCenter",
],
},
"table": {
"contentToolbar": [
"tableColumn",
"tableRow",
"mergeTableCells",
"tableProperties",
"tableCellProperties",
],
"tableProperties": {
"borderColors": customColorPalette,
"backgroundColors": customColorPalette,
},
"tableCellProperties": {
"borderColors": customColorPalette,
"backgroundColors": customColorPalette,
},
},
"heading": {
"options": [
{
"model": "paragraph",
"title": "Paragraph",
"class": "ck-heading_paragraph",
},
{
"model": "heading1",
"view": "h1",
"title": "Heading 1",
"class": "ck-heading_heading1",
},
{
"model": "heading2",
"view": "h2",
"title": "Heading 2",
"class": "ck-heading_heading2",
},
{
"model": "heading3",
"view": "h3",
"title": "Heading 3",
"class": "ck-heading_heading3",
},
]
},
},
"list": {
"properties": {
"styles": "true",
"startIndex": "true",
"reversed": "true",
}
},
}
CKEDITOR_5_FILE_STORAGE = "blog.storage.CustomStorage"
| true | true |
f7f4b9084c9040ca98af3b4ad1cca8b7c7bfbca9 | 431 | py | Python | venv/Scripts/pip3.7-script.py | kaduprasad/Face-recognition-system | febb3022a38a32c55dc1f21b7c278f5463376a6a | [
"MIT"
] | null | null | null | venv/Scripts/pip3.7-script.py | kaduprasad/Face-recognition-system | febb3022a38a32c55dc1f21b7c278f5463376a6a | [
"MIT"
] | null | null | null | venv/Scripts/pip3.7-script.py | kaduprasad/Face-recognition-system | febb3022a38a32c55dc1f21b7c278f5463376a6a | [
"MIT"
] | null | null | null | #!C:\Users\prasad\PycharmProjects\project1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| 33.153846 | 71 | 0.651972 |
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| true | true |
f7f4b952fefbc69760335d13669cb73b4ae63bb8 | 4,737 | py | Python | babel-updater/updater.py | BookOps-CAT/babel | 47c8102bfbad8466185cd0e70501a931dd79ef29 | [
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null | babel-updater/updater.py | BookOps-CAT/babel | 47c8102bfbad8466185cd0e70501a931dd79ef29 | [
"CC0-1.0",
"CC-BY-4.0"
] | 125 | 2017-10-12T12:14:23.000Z | 2022-03-11T23:50:19.000Z | babel-updater/updater.py | BookOps-CAT/babel | 47c8102bfbad8466185cd0e70501a931dd79ef29 | [
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null | # updates babel application
from distutils.dir_util import copy_tree
import logging
import logging.config
import os
import psutil
import shutil
import sys
import time
def run_update(src_directory, dst_directory):
# set up logging
LOG_FILE = 'updater_log.out'
ulogger = logging.getLogger('updater_logger')
ulogger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)-15s: %(levelname)-8s %(message)s')
handler = logging.handlers.RotatingFileHandler(
LOG_FILE, maxBytes=1024 * 1024, backupCount=5)
handler.setFormatter(formatter)
ulogger.addHandler(handler)
ulogger.info('Initiating update...')
ulogger.debug(
f'Update source: {src_directory}, destination: {dst_directory}')
untouchables = []
if os.path.isfile(os.path.join(dst_directory, 'babel.exe')):
ulogger.debug('Located succesfully Babel2 directory.')
# kill the babel app
try:
ulogger.debug('CWD: {}'.format(os.getcwd()))
# subprocess.run(
# 'TASKKILL /F /T /IM babel.exe',
# creationflags=subprocess.CREATE_NO_WINDOW)
killed = False
for proc in psutil.process_iter():
if proc.name() == 'babel.exe':
proc.kill()
killed = True
ulogger.debug('Process babel.exe has been killed.')
if not killed:
ulogger.error('Unable to find & kill babel.exe process.')
time.sleep(1)
except Exception as e:
ulogger.error('Unable to kill babel.exe. Error: {}'.format(e))
ulogger.info('Removing old files...')
# delete content of the main folder except updater.exe
entries = [
f for f in os.listdir(dst_directory) if 'updater' not in f]
for f in entries:
if os.path.isdir(os.path.join(dst_directory, f)):
shutil.rmtree(os.path.join(dst_directory, f))
ulogger.debug(
'Deleted directory: {}'.format(
os.path.join(dst_directory, f)))
elif os.path.isfile(os.path.join(dst_directory, f)):
try:
os.remove(os.path.join(dst_directory, f))
ulogger.debug(
'Deleted file: {}'.format(
os.path.join(dst_directory, f)))
except FileNotFoundError:
ulogger.error(
'Unable to find file: {}'.format(
os.path.join(dst_directory, f)))
except PermissionError:
untouchables.append(f)
ulogger.debug(f'PermissionError on {f}')
except WindowsError:
untouchables.append(f)
ulogger.debug(f'WindowsError on {f}')
else:
print('Unrecognized entry: {}'.format(
os.path.join(dst_directory, f)))
time.sleep(1)
ulogger.info(f'Found following untouchable files: {untouchables}')
ulogger.debug('Copying new files')
# copy updated files
entries = [
f for f in os.listdir(src_directory) if 'updater' not in f]
for f in entries:
try:
if f not in untouchables:
if os.path.isdir(os.path.join(src_directory, f)):
copy_tree(
os.path.join(src_directory, f),
os.path.join(dst_directory, f))
ulogger.debug(
'Copied directory: {}'.format(
os.path.join(dst_directory, f)))
elif os.path.isfile(os.path.join(src_directory, f)):
shutil.copy2(
os.path.join(src_directory, f), dst_directory)
ulogger.debug(
'Copied file: {}'.format(
os.path.join(dst_directory, f)))
else:
ulogger.error(f'Unable to copy entry: {f}')
else:
ulogger.debug(f'Skipping untouchable file {f}')
except PermissionError:
ulogger.error(f'PermissionError on {f}')
ulogger.info('Copying complete...')
time.sleep(1)
ulogger.debug(f'CWD: {os.getcwd()}')
os.startfile('babel.exe')
ulogger.info('Complete. Launching Babel...')
else:
ulogger.error('Unable to locate Babel2 main directory.')
if __name__ == '__main__':
run_update(sys.argv[1], sys.argv[2])
| 37.007813 | 74 | 0.527127 |
from distutils.dir_util import copy_tree
import logging
import logging.config
import os
import psutil
import shutil
import sys
import time
def run_update(src_directory, dst_directory):
LOG_FILE = 'updater_log.out'
ulogger = logging.getLogger('updater_logger')
ulogger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)-15s: %(levelname)-8s %(message)s')
handler = logging.handlers.RotatingFileHandler(
LOG_FILE, maxBytes=1024 * 1024, backupCount=5)
handler.setFormatter(formatter)
ulogger.addHandler(handler)
ulogger.info('Initiating update...')
ulogger.debug(
f'Update source: {src_directory}, destination: {dst_directory}')
untouchables = []
if os.path.isfile(os.path.join(dst_directory, 'babel.exe')):
ulogger.debug('Located succesfully Babel2 directory.')
try:
ulogger.debug('CWD: {}'.format(os.getcwd()))
killed = False
for proc in psutil.process_iter():
if proc.name() == 'babel.exe':
proc.kill()
killed = True
ulogger.debug('Process babel.exe has been killed.')
if not killed:
ulogger.error('Unable to find & kill babel.exe process.')
time.sleep(1)
except Exception as e:
ulogger.error('Unable to kill babel.exe. Error: {}'.format(e))
ulogger.info('Removing old files...')
entries = [
f for f in os.listdir(dst_directory) if 'updater' not in f]
for f in entries:
if os.path.isdir(os.path.join(dst_directory, f)):
shutil.rmtree(os.path.join(dst_directory, f))
ulogger.debug(
'Deleted directory: {}'.format(
os.path.join(dst_directory, f)))
elif os.path.isfile(os.path.join(dst_directory, f)):
try:
os.remove(os.path.join(dst_directory, f))
ulogger.debug(
'Deleted file: {}'.format(
os.path.join(dst_directory, f)))
except FileNotFoundError:
ulogger.error(
'Unable to find file: {}'.format(
os.path.join(dst_directory, f)))
except PermissionError:
untouchables.append(f)
ulogger.debug(f'PermissionError on {f}')
except WindowsError:
untouchables.append(f)
ulogger.debug(f'WindowsError on {f}')
else:
print('Unrecognized entry: {}'.format(
os.path.join(dst_directory, f)))
time.sleep(1)
ulogger.info(f'Found following untouchable files: {untouchables}')
ulogger.debug('Copying new files')
entries = [
f for f in os.listdir(src_directory) if 'updater' not in f]
for f in entries:
try:
if f not in untouchables:
if os.path.isdir(os.path.join(src_directory, f)):
copy_tree(
os.path.join(src_directory, f),
os.path.join(dst_directory, f))
ulogger.debug(
'Copied directory: {}'.format(
os.path.join(dst_directory, f)))
elif os.path.isfile(os.path.join(src_directory, f)):
shutil.copy2(
os.path.join(src_directory, f), dst_directory)
ulogger.debug(
'Copied file: {}'.format(
os.path.join(dst_directory, f)))
else:
ulogger.error(f'Unable to copy entry: {f}')
else:
ulogger.debug(f'Skipping untouchable file {f}')
except PermissionError:
ulogger.error(f'PermissionError on {f}')
ulogger.info('Copying complete...')
time.sleep(1)
ulogger.debug(f'CWD: {os.getcwd()}')
os.startfile('babel.exe')
ulogger.info('Complete. Launching Babel...')
else:
ulogger.error('Unable to locate Babel2 main directory.')
if __name__ == '__main__':
run_update(sys.argv[1], sys.argv[2])
| true | true |
f7f4bb7ec66c59ecf9c383e0854a106e9832d927 | 18,952 | py | Python | roc_auc.py | wentaozhu/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 106 | 2017-03-12T17:26:49.000Z | 2022-02-12T01:37:17.000Z | roc_auc.py | huhansan666666/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 17 | 2017-04-11T14:49:34.000Z | 2022-03-19T07:57:37.000Z | roc_auc.py | huhansan666666/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 41 | 2017-03-21T09:48:39.000Z | 2021-11-29T06:51:16.000Z | """
TrainExtension subclass for calculating ROC AUC scores on monitoring
dataset(s), reported via monitor channels.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
try:
from sklearn.metrics import roc_auc_score, roc_curve
except ImportError:
roc_auc_score = None
import logging
import theano
from theano import gof, config
from theano import tensor as T
from keras.callbacks import Callback
import os
#from pylearn2.train_extensions import TrainExtension
class AUCEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.auc = 0
self.X_val, self.y_val = validation_data
self.filepath = filepath
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
#print(np.sum(y_pred[:,1]))
#y_true = np.argmax(self.y_val, axis=1)
#y_pred = np.argmax(y_pred, axis=1)
#print(y_true.shape, y_pred.shape)
if self.mymil:
score = roc_auc_score(self.y_val.max(axis=1), y_pred.max(axis=1))
else: score = roc_auc_score(self.y_val[:,1], y_pred[:,1])
print("interval evaluation - epoch: {:d} - auc: {:.2f}".format(epoch, score))
if score > self.auc:
self.auc = score
for f in os.listdir('./'):
if f.startswith(self.filepath+'auc'):
os.remove(f)
self.model.save(self.filepath+'auc'+str(score)+'ep'+str(epoch)+'.hdf5')
class RocAucScoreOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='roc_auc', use_c_code=theano.config.cxx):
super(RocAucScoreOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
#rvalue = np.array((roc_auc, prec, reca, f1))
#[0][0]
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
class PrecisionEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.prec = 0
self.X_val, self.y_val = validation_data
self.filepath = filepath
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = np.argmax(y_pred, axis=1)
#print(type(y_true), y_true.shape, type(y_score), y_score.shape)
#print(y_score, y_true)
TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true))
prec = TP / (TP+FP+1e-6)
print("interval evaluation - epoch: {:d} - prec: {:.2f}".format(epoch, prec))
if prec > self.prec:
self.prec = prec
for f in os.listdir('./'):
if f.startswith(self.filepath+'prec'):
os.remove(f)
self.model.save(self.filepath+'prec'+str(prec)+'ep'+str(epoch)+'.hdf5')
class PrecisionOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='precision', use_c_code=theano.config.cxx):
super(PrecisionOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
print(y_true.shape)
y_true = np.argmax(y_true, axis=1)
y_score = np.argmax(y_score, axis=1)
#print(type(y_true), y_true.shape, type(y_score), y_score.shape)
try:
TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true))
prec = TP / (TP+FP+1e-6)
except ValueError:
prec = np.nan
#rvalue = np.array((roc_auc, prec, reca, f1))
#[0][0]
output_storage[0][0] = theano._asarray(prec, dtype=config.floatX)
class RecallEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.reca = 0
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = np.argmax(y_pred, axis=1)
#print(type(y_true), y_true.shape, type(y_score), y_score.shape)
TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true)
reca = TP / (TP+FN+1e-6)
print("interval evaluation - epoch: {:d} - reca: {:.2f}".format(epoch, reca))
if reca > self.reca:
self.reca = reca
for f in os.listdir('./'):
if f.startswith(self.filepath+'reca'):
os.remove(f)
self.model.save(self.filepath+'reca'+str(reca)+'ep'+str(epoch)+'.hdf5')
class RecallOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='recall', use_c_code=theano.config.cxx):
super(RecallOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
y_true = np.argmax(y_true, axis=1)
y_score = np.argmax(y_score, axis=1)
try:
TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true)
reca = TP / (TP+FN+1e-6)
except ValueError:
reca = np.nan
#rvalue = np.array((roc_auc, prec, reca, f1))
#[0][0]
output_storage[0][0] = theano._asarray(reca, dtype=config.floatX)
class F1Epoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.f1 = 0
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
#print(y_pred.shape)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = np.argmax(y_pred, axis=1)
#print(type(y_true), y_true.shape, type(y_score), y_score.shape)
TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true))
#TN = np.sum(truey[predy==0]==0)*1. / (truey.shape[0]-sum(truey))
FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true)
#prec = TP / (TP+FP+1e-6)
#reca = TP / (TP+FN+1e-6)
#f1 = 2*prec*reca / (prec+reca+1e-6)
f1 = 2*TP / (2*TP + FP + FN+1e-6)
print("interval evaluation - epoch: {:d} - f1: {:.2f}".format(epoch, f1))
if f1 > self.f1:
self.f1 = f1
for f in os.listdir('./'):
if f.startswith(self.filepath+'f1'):
os.remove(f)
self.model.save(self.filepath+'f1'+str(f1)+'ep'+str(epoch)+'.hdf5')
class ACCEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.acc = 0
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
#print(y_pred.shape)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)#>0.5
else:
y_true = self.y_val[:,1] #np.argmax(self.y_val, axis=1)
y_score = y_pred[:,1] #np.argmax(y_pred, axis=1)
sortindex = np.argsort(y_score)
y_score = y_score[sortindex]
y_true = y_true[sortindex]
bestacc, bestthresh = np.mean(y_true == np.ones_like(y_true)), y_score[0]-0.001
for thresh in y_score:
acc = np.mean(y_true == (y_score>thresh))
if acc > bestacc:
bestacc, bestthresh = acc, thresh
y_score = y_score>bestthresh
#y_score = y_score >0.5
acc = np.mean(y_true == y_score)
assert(acc == bestacc)
print("interval evaluation - epoch: {:d} - acc: {:.2f}".format(epoch, acc))
if acc > self.acc:
self.acc = acc
for f in os.listdir('./'):
if f.startswith(self.filepath+'acc'):
os.remove(f)
self.model.save(self.filepath+'acc'+str(acc)+'ep'+str(epoch)+'.hdf5')
class LossEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.loss = 1e6
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
#print(y_pred.shape)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = y_pred[np.arange(len(y_true)), y_true] #y_pred[:, y_true] #np.argmax(y_pred, axis=1)
loss = -np.mean(np.log(y_score+1e-6)) #-np.mean(y_true*np.log(y_score+1e-6) + (1-y_true)*np.log(1-y_score+1e-6))
print('')
print("interval evaluation - epoch: {:d} - loss: {:.2f}".format(epoch, loss))
if loss < self.loss:
self.loss = loss
for f in os.listdir('./'):
if f.startswith(self.filepath+'loss'):
os.remove(f)
self.model.save(self.filepath+'loss'+str(loss)+'ep'+str(epoch)+'.hdf5')
class F1Op(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='f1', use_c_code=theano.config.cxx):
super(F1Op, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
y_true = np.argmax(y_true, axis=1)
y_score = np.argmax(y_score, axis=1)
try:
TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true)
FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true))
#TN = np.sum(truey[predy==0]==0)*1. / (truey.shape[0]-sum(truey))
FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true)
#prec = TP / (TP+FP+1e-6)
#reca = TP / (TP+FN+1e-6)
#f1 = 2*prec*reca / (prec+reca+1e-6)
f1 = 2*TP / (2*TP +FP +FN)
except ValueError:
f1 = np.nan
#rvalue = np.array((roc_auc, prec, reca, f1))
#[0][0]
output_storage[0][0] = theano._asarray(f1, dtype=config.floatX)
'''class RocAucChannel(TrainExtension):
"""
Adds a ROC AUC channel to the monitor for each monitoring dataset.
This monitor will return nan unless both classes are represented in
y_true. For this reason, it is recommended to set monitoring_batches
to 1, especially when using unbalanced datasets.
Parameters
----------
channel_name_suffix : str, optional (default 'roc_auc')
Channel name suffix.
positive_class_index : int, optional (default 1)
Index of positive class in predicted values.
negative_class_index : int or None, optional (default None)
Index of negative class in predicted values for calculation of
one vs. one performance. If None, uses all examples not in the
positive class (one vs. the rest).
"""
def __init__(self, channel_name_suffix='roc_auc', positive_class_index=1,
negative_class_index=None):
self.channel_name_suffix = channel_name_suffix
self.positive_class_index = positive_class_index
self.negative_class_index = negative_class_index
def setup(self, model, dataset, algorithm):
"""
Add ROC AUC channels for monitoring dataset(s) to model.monitor.
Parameters
----------
model : object
The model being trained.
dataset : object
Training dataset.
algorithm : object
Training algorithm.
"""
m_space, m_source = model.get_monitoring_data_specs()
state, target = m_space.make_theano_batch()
y = T.argmax(target, axis=1)
y_hat = model.fprop(state)[:, self.positive_class_index]
# one vs. the rest
if self.negative_class_index is None:
y = T.eq(y, self.positive_class_index)
# one vs. one
else:
pos = T.eq(y, self.positive_class_index)
neg = T.eq(y, self.negative_class_index)
keep = T.add(pos, neg).nonzero()
y = T.eq(y[keep], self.positive_class_index)
y_hat = y_hat[keep]
roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat)
roc_auc = T.cast(roc_auc, config.floatX)
for dataset_name, dataset in algorithm.monitoring_dataset.items():
if dataset_name:
channel_name = '{0}_{1}'.format(dataset_name,
self.channel_name_suffix)
else:
channel_name = self.channel_name_suffix
model.monitor.add_channel(name=channel_name,
ipt=(state, target),
val=roc_auc,
data_specs=(m_space, m_source),
dataset=dataset)''' | 37.088063 | 119 | 0.575981 |
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
try:
from sklearn.metrics import roc_auc_score, roc_curve
except ImportError:
roc_auc_score = None
import logging
import theano
from theano import gof, config
from theano import tensor as T
from keras.callbacks import Callback
import os
class AUCEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.auc = 0
self.X_val, self.y_val = validation_data
self.filepath = filepath
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
score = roc_auc_score(self.y_val.max(axis=1), y_pred.max(axis=1))
else: score = roc_auc_score(self.y_val[:,1], y_pred[:,1])
print("interval evaluation - epoch: {:d} - auc: {:.2f}".format(epoch, score))
if score > self.auc:
self.auc = score
for f in os.listdir('./'):
if f.startswith(self.filepath+'auc'):
os.remove(f)
self.model.save(self.filepath+'auc'+str(score)+'ep'+str(epoch)+'.hdf5')
class RocAucScoreOp(gof.Op):
def __init__(self, name='roc_auc', use_c_code=theano.config.cxx):
super(RocAucScoreOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
class PrecisionEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.prec = 0
self.X_val, self.y_val = validation_data
self.filepath = filepath
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = np.argmax(y_pred, axis=1)
TP = np.sum(y_true[y_score==1]==1)*1.
FP = np.sum(y_true[y_score==1]==0)*1.
prec = TP / (TP+FP+1e-6)
print("interval evaluation - epoch: {:d} - prec: {:.2f}".format(epoch, prec))
if prec > self.prec:
self.prec = prec
for f in os.listdir('./'):
if f.startswith(self.filepath+'prec'):
os.remove(f)
self.model.save(self.filepath+'prec'+str(prec)+'ep'+str(epoch)+'.hdf5')
class PrecisionOp(gof.Op):
def __init__(self, name='precision', use_c_code=theano.config.cxx):
super(PrecisionOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
print(y_true.shape)
y_true = np.argmax(y_true, axis=1)
y_score = np.argmax(y_score, axis=1)
try:
TP = np.sum(y_true[y_score==1]==1)*1.
FP = np.sum(y_true[y_score==1]==0)*1.
prec = TP / (TP+FP+1e-6)
except ValueError:
prec = np.nan
output_storage[0][0] = theano._asarray(prec, dtype=config.floatX)
class RecallEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.reca = 0
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = np.argmax(y_pred, axis=1)
TP = np.sum(y_true[y_score==1]==1)*1.
FN = np.sum(y_true[y_score==0]==1)*1.
reca = TP / (TP+FN+1e-6)
print("interval evaluation - epoch: {:d} - reca: {:.2f}".format(epoch, reca))
if reca > self.reca:
self.reca = reca
for f in os.listdir('./'):
if f.startswith(self.filepath+'reca'):
os.remove(f)
self.model.save(self.filepath+'reca'+str(reca)+'ep'+str(epoch)+'.hdf5')
class RecallOp(gof.Op):
def __init__(self, name='recall', use_c_code=theano.config.cxx):
super(RecallOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
y_true = np.argmax(y_true, axis=1)
y_score = np.argmax(y_score, axis=1)
try:
TP = np.sum(y_true[y_score==1]==1)*1.
FN = np.sum(y_true[y_score==0]==1)*1.
reca = TP / (TP+FN+1e-6)
except ValueError:
reca = np.nan
output_storage[0][0] = theano._asarray(reca, dtype=config.floatX)
class F1Epoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.f1 = 0
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = np.argmax(y_pred, axis=1)
TP = np.sum(y_true[y_score==1]==1)*1.
FP = np.sum(y_true[y_score==1]==0)*1.
FN = np.sum(y_true[y_score==0]==1)*1.
f1 = 2*TP / (2*TP + FP + FN+1e-6)
print("interval evaluation - epoch: {:d} - f1: {:.2f}".format(epoch, f1))
if f1 > self.f1:
self.f1 = f1
for f in os.listdir('./'):
if f.startswith(self.filepath+'f1'):
os.remove(f)
self.model.save(self.filepath+'f1'+str(f1)+'ep'+str(epoch)+'.hdf5')
class ACCEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.acc = 0
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)
else:
y_true = self.y_val[:,1]
y_score = y_pred[:,1]
sortindex = np.argsort(y_score)
y_score = y_score[sortindex]
y_true = y_true[sortindex]
bestacc, bestthresh = np.mean(y_true == np.ones_like(y_true)), y_score[0]-0.001
for thresh in y_score:
acc = np.mean(y_true == (y_score>thresh))
if acc > bestacc:
bestacc, bestthresh = acc, thresh
y_score = y_score>bestthresh
acc = np.mean(y_true == y_score)
assert(acc == bestacc)
print("interval evaluation - epoch: {:d} - acc: {:.2f}".format(epoch, acc))
if acc > self.acc:
self.acc = acc
for f in os.listdir('./'):
if f.startswith(self.filepath+'acc'):
os.remove(f)
self.model.save(self.filepath+'acc'+str(acc)+'ep'+str(epoch)+'.hdf5')
class LossEpoch(Callback):
def __init__(self, filepath, validation_data=(), interval=1, mymil=False):
super(Callback, self).__init__()
self.interval = interval
self.filepath = filepath
self.loss = 1e6
self.X_val, self.y_val = validation_data
self.mymil = mymil
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
if self.mymil:
y_true = self.y_val.max(axis=1)
y_score = y_pred.max(axis=1)>0.5
else:
y_true = np.argmax(self.y_val, axis=1)
y_score = y_pred[np.arange(len(y_true)), y_true] og(y_score+1e-6))
print('')
print("interval evaluation - epoch: {:d} - loss: {:.2f}".format(epoch, loss))
if loss < self.loss:
self.loss = loss
for f in os.listdir('./'):
if f.startswith(self.filepath+'loss'):
os.remove(f)
self.model.save(self.filepath+'loss'+str(loss)+'ep'+str(epoch)+'.hdf5')
class F1Op(gof.Op):
def __init__(self, name='f1', use_c_code=theano.config.cxx):
super(F1Op, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
y_true = np.argmax(y_true, axis=1)
y_score = np.argmax(y_score, axis=1)
try:
TP = np.sum(y_true[y_score==1]==1)*1.
FP = np.sum(y_true[y_score==1]==0)*1.
FN = np.sum(y_true[y_score==0]==1)*1.
f1 = 2*TP / (2*TP +FP +FN)
except ValueError:
f1 = np.nan
output_storage[0][0] = theano._asarray(f1, dtype=config.floatX)
| true | true |
f7f4bbfb2c849749f466e7fe364a7d1fb17bc7dc | 217 | py | Python | loader/modules/model.py | dmvieira/ETL-example | 0734f0190ad3af57e6e55636bc75ded533537cfe | [
"MIT"
] | 1 | 2021-02-15T23:43:46.000Z | 2021-02-15T23:43:46.000Z | loader/modules/model.py | dmvieira/ETL-example | 0734f0190ad3af57e6e55636bc75ded533537cfe | [
"MIT"
] | null | null | null | loader/modules/model.py | dmvieira/ETL-example | 0734f0190ad3af57e6e55636bc75ded533537cfe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def get_db():
# database interface need these fields:
#uri = Field()
#name = Field()
#site = Field()
#kind = Field()
pass # MAKE your database connection and interface
| 21.7 | 54 | 0.589862 |
def get_db():
pass
| true | true |
f7f4bc93c34de5e87243892cad9a7c4099e1a21d | 7,604 | py | Python | tests/unit/test_pure_translators.py | yusdacra/dream2nix | 7e31c966fb0f6511115e4a128a0343c85817991d | [
"MIT"
] | 57 | 2021-11-16T13:17:47.000Z | 2022-03-30T07:19:37.000Z | tests/unit/test_pure_translators.py | yusdacra/dream2nix | 7e31c966fb0f6511115e4a128a0343c85817991d | [
"MIT"
] | 35 | 2021-11-16T18:02:33.000Z | 2022-03-30T21:10:49.000Z | tests/unit/test_pure_translators.py | yusdacra/dream2nix | 7e31c966fb0f6511115e4a128a0343c85817991d | [
"MIT"
] | 15 | 2021-11-16T21:54:16.000Z | 2022-03-23T00:26:03.000Z | import nix_ffi
import os
import pytest
def get_projects_to_test():
tests = nix_ffi.eval(
'subsystems.allTranslators',
wrapper_code = '''
{result}: let
lib = (import <nixpkgs> {}).lib;
l = lib // builtins;
in
l.flatten (
l.map
(
translator:
l.map
(source: {
source = l.toString source;
translator = translator.name;
inherit (translator) subsystem type;
})
(translator.generateUnitTestsForProjects or [])
)
result
)
''',
)
result = []
for test in tests:
if test['type'] == 'all':
continue
result.append(dict(
project = dict(
name="test",
relPath="",
translator=test['translator'],
subsystemInfo={},
),
translator=test['translator'],
source = test['source'],
subsystem = test['subsystem'],
type = test['type'],
))
return result
projects = get_projects_to_test()
def check_format_dependencies(dependencies):
assert isinstance(dependencies, list)
for dep in dependencies:
assert set(dep.keys()) == {'name', 'version'}
assert isinstance(dep['name'], str)
assert len(dep['name']) > 0
assert isinstance(dep['version'], str)
assert len(dep['version']) > 0
def check_format_sourceSpec(sourceSpec):
assert isinstance(sourceSpec, dict)
assert 'type' in sourceSpec
@pytest.mark.parametrize("p", projects)
def test_packageName(p):
defaultPackage = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.defaultPackage
''',
)
assert isinstance(defaultPackage, str)
assert len(defaultPackage) > 0
@pytest.mark.parametrize("p", projects)
def test_exportedPackages(p):
exportedPackages = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.exportedPackages
''',
)
assert isinstance(exportedPackages, dict)
assert len(exportedPackages) > 0
@pytest.mark.parametrize("p", projects)
def test_extraObjects(p):
extraObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.extraObjects
''',
)
assert isinstance(extraObjects, list)
for extra_obj in extraObjects:
assert set(extra_obj.keys()) == \
{'name', 'version', 'dependencies', 'sourceSpec'}
assert isinstance(extra_obj['name'], str)
assert len(extra_obj['name']) > 0
assert isinstance(extra_obj['version'], str)
assert len(extra_obj['version']) > 0
check_format_dependencies(extra_obj['dependencies'])
check_format_sourceSpec(extra_obj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_location(p):
location = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.location
''',
)
assert isinstance(location, str)
@pytest.mark.parametrize("p", projects)
def test_serializedRawObjects(p):
serializedRawObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.serializedRawObjects
''',
)
assert isinstance(serializedRawObjects, list)
for raw_obj in serializedRawObjects:
assert isinstance(raw_obj, dict)
@pytest.mark.parametrize("p", projects)
def test_subsystemName(p):
subsystemName = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemName
''',
)
assert isinstance(subsystemName, str)
assert len(subsystemName) > 0
@pytest.mark.parametrize("p", projects)
def test_subsystemAttrs(p):
subsystemAttrs = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemAttrs
''',
)
assert isinstance(subsystemAttrs, dict)
@pytest.mark.parametrize("p", projects)
def test_translatorName(p):
translatorName = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.translatorName
''',
)
assert isinstance(translatorName, str)
assert len(translatorName) > 0
@pytest.mark.parametrize("p", projects)
def test_extractors(p):
finalObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
in
finalObjects ++ (inputs.extraObjects or [])
''',
)
assert isinstance(finalObjects, list)
assert len(finalObjects) > 0
for finalObj in finalObjects:
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_keys(p):
objectsByKey = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
{inherit rawObj;}
// l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
objectsByKey =
l.mapAttrs
(key: keyFunc:
l.foldl'
(merged: finalObj:
merged
// {"${keyFunc finalObj.rawObj finalObj}" = finalObj;})
{}
(finalObjects))
inputs.keys;
in
objectsByKey
''',
)
assert isinstance(objectsByKey, dict)
for key_name, objects in objectsByKey.items():
for finalObj in objects.values():
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies', 'rawObj'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
| 26.587413 | 75 | 0.603761 | import nix_ffi
import os
import pytest
def get_projects_to_test():
tests = nix_ffi.eval(
'subsystems.allTranslators',
wrapper_code = '''
{result}: let
lib = (import <nixpkgs> {}).lib;
l = lib // builtins;
in
l.flatten (
l.map
(
translator:
l.map
(source: {
source = l.toString source;
translator = translator.name;
inherit (translator) subsystem type;
})
(translator.generateUnitTestsForProjects or [])
)
result
)
''',
)
result = []
for test in tests:
if test['type'] == 'all':
continue
result.append(dict(
project = dict(
name="test",
relPath="",
translator=test['translator'],
subsystemInfo={},
),
translator=test['translator'],
source = test['source'],
subsystem = test['subsystem'],
type = test['type'],
))
return result
projects = get_projects_to_test()
def check_format_dependencies(dependencies):
assert isinstance(dependencies, list)
for dep in dependencies:
assert set(dep.keys()) == {'name', 'version'}
assert isinstance(dep['name'], str)
assert len(dep['name']) > 0
assert isinstance(dep['version'], str)
assert len(dep['version']) > 0
def check_format_sourceSpec(sourceSpec):
assert isinstance(sourceSpec, dict)
assert 'type' in sourceSpec
@pytest.mark.parametrize("p", projects)
def test_packageName(p):
defaultPackage = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.defaultPackage
''',
)
assert isinstance(defaultPackage, str)
assert len(defaultPackage) > 0
@pytest.mark.parametrize("p", projects)
def test_exportedPackages(p):
exportedPackages = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.exportedPackages
''',
)
assert isinstance(exportedPackages, dict)
assert len(exportedPackages) > 0
@pytest.mark.parametrize("p", projects)
def test_extraObjects(p):
extraObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.extraObjects
''',
)
assert isinstance(extraObjects, list)
for extra_obj in extraObjects:
assert set(extra_obj.keys()) == \
{'name', 'version', 'dependencies', 'sourceSpec'}
assert isinstance(extra_obj['name'], str)
assert len(extra_obj['name']) > 0
assert isinstance(extra_obj['version'], str)
assert len(extra_obj['version']) > 0
check_format_dependencies(extra_obj['dependencies'])
check_format_sourceSpec(extra_obj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_location(p):
location = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.location
''',
)
assert isinstance(location, str)
@pytest.mark.parametrize("p", projects)
def test_serializedRawObjects(p):
serializedRawObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.serializedRawObjects
''',
)
assert isinstance(serializedRawObjects, list)
for raw_obj in serializedRawObjects:
assert isinstance(raw_obj, dict)
@pytest.mark.parametrize("p", projects)
def test_subsystemName(p):
subsystemName = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemName
''',
)
assert isinstance(subsystemName, str)
assert len(subsystemName) > 0
@pytest.mark.parametrize("p", projects)
def test_subsystemAttrs(p):
subsystemAttrs = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.subsystemAttrs
''',
)
assert isinstance(subsystemAttrs, dict)
@pytest.mark.parametrize("p", projects)
def test_translatorName(p):
translatorName = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
result.inputs.translatorName
''',
)
assert isinstance(translatorName, str)
assert len(translatorName) > 0
@pytest.mark.parametrize("p", projects)
def test_extractors(p):
finalObjects = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
in
finalObjects ++ (inputs.extraObjects or [])
''',
)
assert isinstance(finalObjects, list)
assert len(finalObjects) > 0
for finalObj in finalObjects:
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
@pytest.mark.parametrize("p", projects)
def test_keys(p):
objectsByKey = nix_ffi.eval(
f"subsystems.{p['subsystem']}.translators.{p['translator']}.translate",
params=dict(
project=p['project'],
source=p['source'],
),
wrapper_code = '''
{result}:
let
l = builtins;
inputs = result.inputs;
rawObjects = inputs.serializedRawObjects;
finalObjects =
l.map
(rawObj: let
finalObj =
{inherit rawObj;}
// l.mapAttrs
(key: extractFunc: extractFunc rawObj finalObj)
inputs.extractors;
in
finalObj)
rawObjects;
objectsByKey =
l.mapAttrs
(key: keyFunc:
l.foldl'
(merged: finalObj:
merged
// {"${keyFunc finalObj.rawObj finalObj}" = finalObj;})
{}
(finalObjects))
inputs.keys;
in
objectsByKey
''',
)
assert isinstance(objectsByKey, dict)
for key_name, objects in objectsByKey.items():
for finalObj in objects.values():
assert set(finalObj.keys()) == \
{'name', 'version', 'sourceSpec', 'dependencies', 'rawObj'}
check_format_dependencies(finalObj['dependencies'])
check_format_sourceSpec(finalObj['sourceSpec'])
| true | true |
f7f4bc9a39586ce7163d6739eadf68232b36739a | 12,398 | py | Python | busca-jogos/buscaccold.py | IvanBrasilico/AI-NanoDegree | 531e63d99ae906b5908e064e9b716ebe22e48c8f | [
"MIT"
] | null | null | null | busca-jogos/buscaccold.py | IvanBrasilico/AI-NanoDegree | 531e63d99ae906b5908e064e9b716ebe22e48c8f | [
"MIT"
] | null | null | null | busca-jogos/buscaccold.py | IvanBrasilico/AI-NanoDegree | 531e63d99ae906b5908e064e9b716ebe22e48c8f | [
"MIT"
] | null | null | null | import random
from collections import OrderedDict
COLUNAS = 'ABCDEF'
PILHAS = 'abcde'
ALTURAS = '12345'
def busca_acima(posicoes, stack, posicao=None, conteiner=None):
if posicao is None:
posicao = posicoes[conteiner]
coluna = posicao[0]
pilha = posicao[1]
altura = int(posicao[2])
"""result = []
for ind in range(altura, 6):
print(coluna, pilha, ind)
print(coluna+pilha)
result.append(stack[coluna+pilha+str(ind)])
"""
return [(coluna + pilha + str(ind),
stack[coluna + pilha + str(ind)]) for ind in range(altura, 6)]
class Container():
def __init__(self, numero):
self._numero = numero
def time_to_leave(self):
# TODO: implement time regressor
return 5
def __str__(self):
return self._numero
def __repr__(self):
return self._numero
class Pilha():
"""Define uma pilha de largura [A-E] e altura [0-7]"""
def __init__(self, nome):
self._pilha = OrderedDict()
self._nome = nome
for coluna in COLUNAS:
for altura in ALTURAS:
if self._pilha.get(coluna) is None:
self._pilha[coluna] = OrderedDict()
self._pilha[coluna][altura] = None
def mean(self):
soma = 0
qtde = 0
for coluna in self.pilha.values():
for container in coluna:
if container:
soma += container.time_to_leave()
qtde += 1
return soma / qtde
def position_totuple(self, position):
coluna = position[0]
altura = position[1]
return coluna, altura
def get_containerinposition(self, position):
coluna, altura = self.position_totuple(position)
return self._pilha[coluna][altura]
def side_locked(self, pcoluna, paltura):
firstcol = COLUNAS.find(pcoluna)
firstheight = ALTURAS.find(paltura)
for coluna in COLUNAS[firstcol + 1:]:
for altura in ALTURAS[firstheight:]:
if self._pilha[coluna][altura] is not None:
return True
return False
def up_locked(self, pcoluna, paltura):
firstheight = ALTURAS.find(paltura)
for altura in ALTURAS[firstheight + 1:]:
if self._pilha[pcoluna][altura] is not None:
return True
return False
def is_position_locked(self, position):
"""Retorna posicao se livre, senao None
:param posicao: String 'coluna'+'altura'. Caso nao passada,
retorna primeira livre
"""
coluna, altura = self.position_totuple(position)
if self._pilha[coluna][altura] is not None:
if not (self.up_locked(coluna, altura) or
self.side_locked(coluna, altura)):
return coluna, altura
return False, False
def remove(self, position, container):
coluna, altura = self.is_position_locked(position)
# print(coluna, altura)
if coluna:
stacked_container = self._pilha[coluna][altura]
# print(stacked_container)
if stacked_container == container:
self._pilha[coluna][altura] = None
return True
return False
def first_free_position(self):
for coluna in COLUNAS:
for altura in ALTURAS:
if self._pilha[coluna][altura] == None:
return coluna, altura
return False, False
def is_position_free(self, position=None):
"""Retorna posicao se livre, senao None
:param posicao: String 'coluna'+'altura'. Caso nao passada,
retorna primeira livre
"""
if position:
coluna, altura = self.position_totuple(position)
if self._pilha[coluna][altura] is None:
return coluna, altura
else:
return self.first_free_position()
def stack(self, container, posicao):
coluna, altura = self.is_position_free(posicao)
if coluna:
self._pilha[coluna][altura] = container
return coluna + altura
return False
class Patio():
def __init__(self, nome=''):
self._nome = nome
self._pilhas = OrderedDict()
self._containers = OrderedDict()
self._history = OrderedDict()
def add_pilha(self, nome_pilha=None):
self._pilhas[nome_pilha] = Pilha(nome_pilha)
def stack(self, container, nome_pilha, posicao=None):
pilha = self._pilhas[nome_pilha]
posicao = pilha.stack(container, posicao)
if posicao:
self._containers[container._numero] = (nome_pilha, posicao, container)
return posicao
def unstack(self, nome_pilha, position, container):
pilha = self._pilhas.get(nome_pilha)
if pilha:
sucess = pilha.remove(position, container)
if sucess:
self._history[container._numero] = \
self._containers.pop(container._numero)
return True
return False
def add_container(self, container, nome_pilha=None, posicao=None):
"""Adiciona container na pilha, ou no pátio.
Retorna None se pilha cheia ou pátio cheio.
:param container: Objeto Container
:param nome_pilha: Nome da pilha a utilizar.
Se não passado, procura em todas
:param posicao: String 'B5' 'coluna_altura'
:return: None se pilha/pátio cheio, senão posição
"""
if nome_pilha is None:
for pilha in self._pilhas.values():
posicao = self.add_container(container, pilha._nome, posicao)
if posicao is not None:
break
else:
posicao = self.stack(container, nome_pilha, posicao)
return posicao
def get_container_tuple(self, numero):
nome_pilha, position, container = self._containers.get(numero, (None, None, None))
return nome_pilha, position, container
def get_container_numero(self, numero):
nome_pilha, position, container = self.get_container_tuple(numero)
if nome_pilha:
return container
return None
def remove_container(self, container):
if container is None or not (isinstance(container, Container)):
return False
nome_pilha, position, container = self.get_container_tuple(container._numero)
if position is None:
return False
return self.remove_position(nome_pilha, position, container)
def remove_position(self, nome_pilha, position, container):
return self.unstack(nome_pilha, position, container)
class GerenteRemocao:
def __init__(self, patio: Patio):
self._patio = patio
def add_container(self, container, nome_pilha=None, posicao=None):
return self._patio.add_container(container, nome_pilha, posicao)
def monta_caminho_remocao(self, numero: str) -> list:
"""Analisa caminho mínimo para remoção do container."""
nome_pilha, position, container = self._patio.get_container_tuple(numero)
pilha = self._patio._pilhas.get(nome_pilha)
caminho = []
if pilha:
if numero == container._numero:
coluna = position[0]
altura = position[1]
firstcol = COLUNAS.find(coluna)
firstheight = ALTURAS.find(altura)
for coluna in reversed(COLUNAS[firstcol:]):
for altura in reversed(ALTURAS[firstheight:]):
if pilha._pilha[coluna][altura] is not None:
caminho.append(pilha._pilha[coluna][altura])
return caminho
def remove_caminho(self, numero: str) -> list:
caminho = self.monta_caminho_remocao(numero)
for container in caminho:
self._patio.remove_container(container)
return caminho
"""
lista_containers = ['{0:05d}'.format(num) for num in range(10000)]
stack = OrderedDict()
for coluna in COLUNAS:
for pilha in PILHAS:
for altura in ALTURAS:
stack[coluna + pilha + altura] = None
print(lista_containers[1:10])
print(lista_containers[-9:])
print(stack)
print(len(stack))
posicoes = OrderedDict()
for posicao in stack.keys():
conteiner = choice(lista_containers)
while conteiner in posicoes:
conteiner = choice(lista_containers)
posicoes[conteiner] = posicao
stack[posicao] = conteiner
print(posicoes)
print(stack)
print(busca_acima(posicoes, stack, 'Eb1'))
"""
patio = Patio()
patio.add_pilha('TESTE')
print(patio._pilhas['TESTE']._pilha)
for r in range(1, 33):
container = Container('{0:03d}'.format(r))
patio.add_container(container)
print(patio._pilhas['TESTE']._pilha)
container30 = Container('030')
print(patio.add_container(container30))
print(patio._pilhas['TESTE']._pilha)
container31 = Container('031')
print(patio.add_container(container31))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print(patio.remove_container(container30))
print(patio.add_container(container31))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
container20 = patio.get_container_numero('20')
print(container20)
print(patio.remove_container(container20))
if not container20:
container20 = patio.get_container_numero('020')
print(container20)
print(patio.remove_container(container20))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print(patio.remove_container(container30))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print(patio.remove_container(container31))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print('history: ', patio._history)
gerente = GerenteRemocao(patio)
print(gerente.monta_caminho_remocao('020'))
container003 = patio.get_container_numero('003')
print(gerente.monta_caminho_remocao('003'))
print(gerente.remove_caminho('020'))
print(gerente.remove_caminho('003'))
print(patio._history)
lista_containers = ['{0:05d}'.format(num) for num in range(10000)]
# Caothic
totalgeral = 0
for turn in range(10):
patio_carlo = Patio()
patio_carlo.add_pilha('TESTE')
gerente = GerenteRemocao(patio_carlo)
# print('1')
for add_cc in range(20):
ind = random.randint(0, len(lista_containers) - 1)
numero = lista_containers.pop(ind)
posicao = gerente.add_container(Container(numero))
# print('numero', numero)
# print('Posição', posicao)
# print('2')
# print('Turn: %s Containers: %s' % (turn, patio_carlo._containers.keys()))
numeros = [k for k in patio_carlo._containers.keys()]
# print(numeros)
totalremocoes = 0
for remove_cc in range(20):
numeros = [k for k in patio_carlo._containers.keys()]
# print(numeros)
# TODO: fazer reposição
if len(numeros) == 0:
break
numero = random.choice(numeros)
caminho = gerente.remove_caminho(numero)
totalremocoes += len(caminho)
for container in caminho:
if container._numero != numero:
gerente.add_container(container)
# print('caminho', caminho)
print('Turn: %s Remoções: %s' % (turn, totalremocoes))
totalgeral += totalremocoes
print(totalgeral/turn)
#Ordered
totalgeral = 0
for turn in range(10):
patio_carlo = Patio()
patio_carlo.add_pilha('TESTE')
gerente = GerenteRemocao(patio_carlo)
for add_cc in range(20):
ind = random.randint(0, len(lista_containers) - 1)
numero = lista_containers.pop(ind)
posicao = gerente.add_container(Container(numero))
numeros = [k for k in patio_carlo._containers.keys()]
totalremocoes = 0
caminhos = []
for remove_cc in range(20):
numeros = [k for k in patio_carlo._containers.keys()]
numero = random.choice(numeros)
caminho = gerente.monta_caminho_remocao(numero)
caminhos.append((len(caminho), numero))
for _, numero in sorted(caminhos, key=lambda x: x[0]):
caminho = gerente.remove_caminho(numero)
for container in caminho:
if container._numero != numero:
gerente.add_container(container)
totalremocoes += len(caminho)
print('Turn: %s Remoções: %s' % (turn, totalremocoes))
totalgeral += totalremocoes
print(totalgeral/turn) | 33.061333 | 90 | 0.63599 | import random
from collections import OrderedDict
COLUNAS = 'ABCDEF'
PILHAS = 'abcde'
ALTURAS = '12345'
def busca_acima(posicoes, stack, posicao=None, conteiner=None):
if posicao is None:
posicao = posicoes[conteiner]
coluna = posicao[0]
pilha = posicao[1]
altura = int(posicao[2])
return [(coluna + pilha + str(ind),
stack[coluna + pilha + str(ind)]) for ind in range(altura, 6)]
class Container():
def __init__(self, numero):
self._numero = numero
def time_to_leave(self):
return 5
def __str__(self):
return self._numero
def __repr__(self):
return self._numero
class Pilha():
def __init__(self, nome):
self._pilha = OrderedDict()
self._nome = nome
for coluna in COLUNAS:
for altura in ALTURAS:
if self._pilha.get(coluna) is None:
self._pilha[coluna] = OrderedDict()
self._pilha[coluna][altura] = None
def mean(self):
soma = 0
qtde = 0
for coluna in self.pilha.values():
for container in coluna:
if container:
soma += container.time_to_leave()
qtde += 1
return soma / qtde
def position_totuple(self, position):
coluna = position[0]
altura = position[1]
return coluna, altura
def get_containerinposition(self, position):
coluna, altura = self.position_totuple(position)
return self._pilha[coluna][altura]
def side_locked(self, pcoluna, paltura):
firstcol = COLUNAS.find(pcoluna)
firstheight = ALTURAS.find(paltura)
for coluna in COLUNAS[firstcol + 1:]:
for altura in ALTURAS[firstheight:]:
if self._pilha[coluna][altura] is not None:
return True
return False
def up_locked(self, pcoluna, paltura):
firstheight = ALTURAS.find(paltura)
for altura in ALTURAS[firstheight + 1:]:
if self._pilha[pcoluna][altura] is not None:
return True
return False
def is_position_locked(self, position):
coluna, altura = self.position_totuple(position)
if self._pilha[coluna][altura] is not None:
if not (self.up_locked(coluna, altura) or
self.side_locked(coluna, altura)):
return coluna, altura
return False, False
def remove(self, position, container):
coluna, altura = self.is_position_locked(position)
if coluna:
stacked_container = self._pilha[coluna][altura]
if stacked_container == container:
self._pilha[coluna][altura] = None
return True
return False
def first_free_position(self):
for coluna in COLUNAS:
for altura in ALTURAS:
if self._pilha[coluna][altura] == None:
return coluna, altura
return False, False
def is_position_free(self, position=None):
if position:
coluna, altura = self.position_totuple(position)
if self._pilha[coluna][altura] is None:
return coluna, altura
else:
return self.first_free_position()
def stack(self, container, posicao):
coluna, altura = self.is_position_free(posicao)
if coluna:
self._pilha[coluna][altura] = container
return coluna + altura
return False
class Patio():
def __init__(self, nome=''):
self._nome = nome
self._pilhas = OrderedDict()
self._containers = OrderedDict()
self._history = OrderedDict()
def add_pilha(self, nome_pilha=None):
self._pilhas[nome_pilha] = Pilha(nome_pilha)
def stack(self, container, nome_pilha, posicao=None):
pilha = self._pilhas[nome_pilha]
posicao = pilha.stack(container, posicao)
if posicao:
self._containers[container._numero] = (nome_pilha, posicao, container)
return posicao
def unstack(self, nome_pilha, position, container):
pilha = self._pilhas.get(nome_pilha)
if pilha:
sucess = pilha.remove(position, container)
if sucess:
self._history[container._numero] = \
self._containers.pop(container._numero)
return True
return False
def add_container(self, container, nome_pilha=None, posicao=None):
if nome_pilha is None:
for pilha in self._pilhas.values():
posicao = self.add_container(container, pilha._nome, posicao)
if posicao is not None:
break
else:
posicao = self.stack(container, nome_pilha, posicao)
return posicao
def get_container_tuple(self, numero):
nome_pilha, position, container = self._containers.get(numero, (None, None, None))
return nome_pilha, position, container
def get_container_numero(self, numero):
nome_pilha, position, container = self.get_container_tuple(numero)
if nome_pilha:
return container
return None
def remove_container(self, container):
if container is None or not (isinstance(container, Container)):
return False
nome_pilha, position, container = self.get_container_tuple(container._numero)
if position is None:
return False
return self.remove_position(nome_pilha, position, container)
def remove_position(self, nome_pilha, position, container):
return self.unstack(nome_pilha, position, container)
class GerenteRemocao:
def __init__(self, patio: Patio):
self._patio = patio
def add_container(self, container, nome_pilha=None, posicao=None):
return self._patio.add_container(container, nome_pilha, posicao)
def monta_caminho_remocao(self, numero: str) -> list:
nome_pilha, position, container = self._patio.get_container_tuple(numero)
pilha = self._patio._pilhas.get(nome_pilha)
caminho = []
if pilha:
if numero == container._numero:
coluna = position[0]
altura = position[1]
firstcol = COLUNAS.find(coluna)
firstheight = ALTURAS.find(altura)
for coluna in reversed(COLUNAS[firstcol:]):
for altura in reversed(ALTURAS[firstheight:]):
if pilha._pilha[coluna][altura] is not None:
caminho.append(pilha._pilha[coluna][altura])
return caminho
def remove_caminho(self, numero: str) -> list:
caminho = self.monta_caminho_remocao(numero)
for container in caminho:
self._patio.remove_container(container)
return caminho
patio = Patio()
patio.add_pilha('TESTE')
print(patio._pilhas['TESTE']._pilha)
for r in range(1, 33):
container = Container('{0:03d}'.format(r))
patio.add_container(container)
print(patio._pilhas['TESTE']._pilha)
container30 = Container('030')
print(patio.add_container(container30))
print(patio._pilhas['TESTE']._pilha)
container31 = Container('031')
print(patio.add_container(container31))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print(patio.remove_container(container30))
print(patio.add_container(container31))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
container20 = patio.get_container_numero('20')
print(container20)
print(patio.remove_container(container20))
if not container20:
container20 = patio.get_container_numero('020')
print(container20)
print(patio.remove_container(container20))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print(patio.remove_container(container30))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print(patio.remove_container(container31))
print(patio._pilhas['TESTE']._pilha)
print(patio._containers)
print('history: ', patio._history)
gerente = GerenteRemocao(patio)
print(gerente.monta_caminho_remocao('020'))
container003 = patio.get_container_numero('003')
print(gerente.monta_caminho_remocao('003'))
print(gerente.remove_caminho('020'))
print(gerente.remove_caminho('003'))
print(patio._history)
lista_containers = ['{0:05d}'.format(num) for num in range(10000)]
totalgeral = 0
for turn in range(10):
patio_carlo = Patio()
patio_carlo.add_pilha('TESTE')
gerente = GerenteRemocao(patio_carlo)
for add_cc in range(20):
ind = random.randint(0, len(lista_containers) - 1)
numero = lista_containers.pop(ind)
posicao = gerente.add_container(Container(numero))
numeros = [k for k in patio_carlo._containers.keys()]
totalremocoes = 0
for remove_cc in range(20):
numeros = [k for k in patio_carlo._containers.keys()]
if len(numeros) == 0:
break
numero = random.choice(numeros)
caminho = gerente.remove_caminho(numero)
totalremocoes += len(caminho)
for container in caminho:
if container._numero != numero:
gerente.add_container(container)
print('Turn: %s Remoções: %s' % (turn, totalremocoes))
totalgeral += totalremocoes
print(totalgeral/turn)
totalgeral = 0
for turn in range(10):
patio_carlo = Patio()
patio_carlo.add_pilha('TESTE')
gerente = GerenteRemocao(patio_carlo)
for add_cc in range(20):
ind = random.randint(0, len(lista_containers) - 1)
numero = lista_containers.pop(ind)
posicao = gerente.add_container(Container(numero))
numeros = [k for k in patio_carlo._containers.keys()]
totalremocoes = 0
caminhos = []
for remove_cc in range(20):
numeros = [k for k in patio_carlo._containers.keys()]
numero = random.choice(numeros)
caminho = gerente.monta_caminho_remocao(numero)
caminhos.append((len(caminho), numero))
for _, numero in sorted(caminhos, key=lambda x: x[0]):
caminho = gerente.remove_caminho(numero)
for container in caminho:
if container._numero != numero:
gerente.add_container(container)
totalremocoes += len(caminho)
print('Turn: %s Remoções: %s' % (turn, totalremocoes))
totalgeral += totalremocoes
print(totalgeral/turn) | true | true |
f7f4bcbaaf66c974775580d50b3f253290ddff72 | 631 | py | Python | test/programytest/clients/render/test_passthrough.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/clients/render/test_passthrough.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/clients/render/test_passthrough.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import unittest
import unittest.mock
from programy.clients.render.passthrough import PassThroughRenderer
class MockConsoleBotClient(object):
def __init__(self):
self._response = None
def process_response(self, client_context, response):
self._response = response
class PassThroughRendererTests(unittest.TestCase):
def test_text_only(self):
mock_console = MockConsoleBotClient()
renderer = PassThroughRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "Hello world")
self.assertEqual(mock_console._response, "Hello world")
| 24.269231 | 67 | 0.735341 | import unittest
import unittest.mock
from programy.clients.render.passthrough import PassThroughRenderer
class MockConsoleBotClient(object):
def __init__(self):
self._response = None
def process_response(self, client_context, response):
self._response = response
class PassThroughRendererTests(unittest.TestCase):
def test_text_only(self):
mock_console = MockConsoleBotClient()
renderer = PassThroughRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "Hello world")
self.assertEqual(mock_console._response, "Hello world")
| true | true |
f7f4bd06621a1b3a71c71328142506f0024b8094 | 6,927 | py | Python | backend/weeklyemailapp_1/settings.py | crowdbotics-dev/weeklyemailapp-1 | fcf086394e864b8fa3606e18f6c5fe26146cc622 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/weeklyemailapp_1/settings.py | crowdbotics-dev/weeklyemailapp-1 | fcf086394e864b8fa3606e18f6c5fe26146cc622 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/weeklyemailapp_1/settings.py | crowdbotics-dev/weeklyemailapp-1 | fcf086394e864b8fa3606e18f6c5fe26146cc622 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for weeklyemailapp_1 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weeklyemailapp_1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weeklyemailapp_1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.602564 | 112 | 0.730908 |
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weeklyemailapp_1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weeklyemailapp_1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| true | true |
f7f4bd4cdba5e9c2a6b359065b553ed5fb37e22b | 1,586 | py | Python | examples/9-add-an-attribute-to-the-string-set.py | sprucegum/lucid-dynamodb | 1e7145b14f1f462e2d8fe320b46b3820967bbcbf | [
"MIT"
] | 70 | 2021-05-30T13:34:57.000Z | 2021-06-13T21:05:06.000Z | examples/9-add-an-attribute-to-the-string-set.py | dineshsonachalam/Course_Registration_System | 1e7145b14f1f462e2d8fe320b46b3820967bbcbf | [
"MIT"
] | 21 | 2021-07-31T07:10:51.000Z | 2022-02-05T23:46:21.000Z | examples/9-add-an-attribute-to-the-string-set.py | dineshsonachalam/Course_Registration_System | 1e7145b14f1f462e2d8fe320b46b3820967bbcbf | [
"MIT"
] | 3 | 2021-07-12T10:52:36.000Z | 2021-12-06T19:51:05.000Z | from LucidDynamodb import DynamoDb
from LucidDynamodb.exceptions import (
UnexpectedError
)
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
try:
db = DynamoDb()
db.update_item(
table_name="dev_jobs",
key={
"company_name": "Google",
"role_id": "111"
},
attributes_to_update={
'benefits': "Free Food"
},
operation="ADD_ATTRIBUTE_TO_STRING_SET"
)
logging.info("Update is successful")
item = db.read_item(
table_name="dev_jobs",
key={
"company_name": "Google",
"role_id": "111"
}
)
logging.info(f"Item: {item}")
except UnexpectedError as e:
logging.error(f"Update failed - {e}")
"""
dineshsonachalam@macbook examples % python 9-add-an-attribute-to-the-string-set.py
INFO:botocore.credentials:Found credentials in environment variables.
INFO:root:Update is successful
INFO:root:Item: {
'locations': ['Mountain View, California', 'Austin, Texas', 'Chicago, IL', 'Detroit, Michigan'],
'role_id': '111',
'overall_review': {
'compensation_and_benefits': '3.9/5',
'overall_rating': '4/5',
'yearly_bonus_percent': Decimal('12')
},
'company_name': 'Google',
'role': 'Staff Software Engineer 2',
'yearly_hike_percent': Decimal('8'),
'salary': '$1,50,531',
'benefits': {
'Travel reimbursements',
'Free Food',
'Health insurance',
'Internet, Medical, Edu reimbursements'
}
}
""" | 27.824561 | 97 | 0.598991 | from LucidDynamodb import DynamoDb
from LucidDynamodb.exceptions import (
UnexpectedError
)
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
try:
db = DynamoDb()
db.update_item(
table_name="dev_jobs",
key={
"company_name": "Google",
"role_id": "111"
},
attributes_to_update={
'benefits': "Free Food"
},
operation="ADD_ATTRIBUTE_TO_STRING_SET"
)
logging.info("Update is successful")
item = db.read_item(
table_name="dev_jobs",
key={
"company_name": "Google",
"role_id": "111"
}
)
logging.info(f"Item: {item}")
except UnexpectedError as e:
logging.error(f"Update failed - {e}")
| true | true |
f7f4be00ce835131a743c6fdfce2d9112c021dca | 39,689 | py | Python | gaofenbisai_9436.py | aDecisionTree/HRNet_for_PolSAR_seg | 5243437ffa99ac4bce074d8f19bbdc1ec054f4b0 | [
"MIT"
] | 2 | 2021-05-18T15:27:00.000Z | 2022-02-16T01:40:02.000Z | gaofenbisai_9436.py | aDecisionTree/HRNet_for_PolSAR_seg | 5243437ffa99ac4bce074d8f19bbdc1ec054f4b0 | [
"MIT"
] | 1 | 2021-11-08T09:38:36.000Z | 2021-11-10T03:01:23.000Z | gaofenbisai_9436.py | aDecisionTree/HRNet_for_PolSAR_seg | 5243437ffa99ac4bce074d8f19bbdc1ec054f4b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from PIL import Image,ImagePalette
import numpy as np
import yaml
from skimage import io
from torchvision import transforms
import os
import logging
# import functools
import torch
import torch.nn as nn
# import torch._utils
import torch.nn.functional as F
import torch.optim as optim
import xml.dom.minidom as xml
import glob
import threading
"""# label test
[ 0, 15, 40, 45, 190, 220]
"""
def writeDoc(filename_s,resultfile_s,path_s):
origin_s = 'GF2/GF3'
version_s = '4.0'
provider_s = '中国海洋大学'
author_s = '抹茶拿铁'
pluginname_s = '地物标注'
pluginclass_s = '标注'
time_s = '2020-07-2020-11'
doc = xml.Document()
annotation = doc.createElement('annotation')
source = doc.createElement('source')
filename = doc.createElement('filename')
origin = doc.createElement('origin')
research = doc.createElement('research')
version = doc.createElement('version')
provider = doc.createElement('provider')
author = doc.createElement('author')
pluginname = doc.createElement('pluginname')
pluginclass = doc.createElement('pluginclass')
time = doc.createElement('time')
segmentation = doc.createElement('segmentation')
resultfile = doc.createElement('resultfile')
filename.appendChild(doc.createTextNode(filename_s))
origin.appendChild(doc.createTextNode(origin_s))
version.appendChild(doc.createTextNode(version_s))
provider.appendChild(doc.createTextNode(provider_s))
author.appendChild(doc.createTextNode(author_s))
pluginname.appendChild(doc.createTextNode(pluginname_s))
pluginclass.appendChild(doc.createTextNode(pluginclass_s))
time.appendChild(doc.createTextNode(time_s))
resultfile.appendChild(doc.createTextNode(resultfile_s))
doc.appendChild(annotation)
annotation.appendChild(source)
annotation.appendChild(research)
annotation.appendChild(segmentation)
source.appendChild(filename)
source.appendChild(origin)
research.appendChild(version)
research.appendChild(provider)
research.appendChild(author)
research.appendChild(pluginname)
research.appendChild(pluginclass)
research.appendChild(time)
segmentation.appendChild(resultfile)
with open(path_s, 'wb') as fp:
fp.write(doc.toprettyxml(indent='\t',newl='\n',encoding='utf-8'))
fp.close()
palette = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 0, 102, 0, 0, 153, 0, 0, 204, 0, 0, 255, 0, 0, 0, 51, 0, 51, 51, 0, 102, 51, 0, 153, 51, 0, 204, 51, 0, 255, 51, 0, 0, 102, 0, 51, 102, 0, 102, 102, 0, 153, 102, 0, 204, 102, 0, 255, 102, 0, 0, 153, 0, 51, 153, 0, 102, 153, 0, 153, 153, 0, 204, 153, 0, 255, 153, 0, 0, 204, 0, 51, 204, 0, 102, 204, 0, 153, 204, 0, 204, 204, 0, 255, 204, 0, 0, 255, 0, 51, 255, 0, 102, 255, 0, 153, 255, 0, 204, 255, 0, 255, 255, 0, 0, 0, 51, 51, 0, 51, 102, 0, 51, 153, 0, 51, 204, 0, 51, 255, 0, 51, 0, 51, 51, 51, 51, 51, 102, 51, 51, 153, 51, 51, 204, 51, 51, 255, 51, 51, 0, 102, 51, 51, 102, 51, 102, 102, 51, 153, 102, 51, 204, 102, 51, 255, 102, 51, 0, 153, 51, 51, 153, 51, 102, 153, 51, 153, 153, 51, 204, 153, 51, 255, 153, 51, 0, 204, 51, 51, 204, 51, 102, 204, 51, 153, 204, 51, 204, 204, 51, 255, 204, 51, 0, 255, 51, 51, 255, 51, 102, 255, 51, 153, 255, 51, 204, 255, 51, 255, 255, 51, 0, 0, 102, 51, 0, 102, 102, 0, 102, 153, 0, 102, 204, 0, 102, 255, 0, 102, 0, 51, 102, 51, 51, 102, 102, 51, 102, 153, 51, 102, 204, 51, 102, 255, 51, 102, 0, 102, 102, 51, 102, 102, 102, 102, 102, 153, 102, 102, 204, 102, 102, 255, 102, 102, 0, 153, 102, 51, 153, 102, 102, 153, 102, 153, 153, 102, 204, 153, 102, 255, 153, 102, 0, 204, 102, 51, 204, 102, 102, 204, 102, 153, 204, 102, 204, 204, 102, 255, 204, 102, 0, 255, 102, 51, 255, 102, 102, 255, 102, 153, 255, 102, 204, 255, 102, 255, 255, 102, 0, 0, 153, 51, 0, 153, 102, 0, 153, 153, 0, 153, 204, 0, 153, 255, 0, 153, 0, 51, 153, 51, 51, 153, 102, 51, 153, 153, 51, 153, 204, 51, 153, 255, 51, 153, 0, 102, 153, 51, 102, 153, 102, 102, 153, 153, 102, 153, 204, 102, 153, 255, 102, 153, 0, 153, 153, 51, 153, 153, 102, 153, 153, 153, 153, 153, 204, 153, 153, 255, 153, 153, 0, 204, 153, 51, 204, 153, 102, 204, 153, 153, 204, 153, 204, 204, 153, 255, 204, 153, 0, 255, 153, 51, 255, 153, 102, 255, 153, 153, 255, 153, 204, 255, 153, 255, 255, 153, 0, 0, 204, 51, 0, 204, 102, 0, 204, 153, 0, 204, 204, 0, 204, 255, 0, 204, 0, 51, 204, 51, 51, 204, 102, 51, 204, 153, 51, 204, 204, 51, 204, 255, 51, 204, 0, 102, 204, 51, 102, 204, 102, 102, 204, 153, 102, 204, 204, 102, 204, 255, 102, 204, 0, 153, 204, 51, 153, 204, 102, 153, 204, 153, 153, 204, 204, 153, 204, 255, 153, 204, 0, 204, 204, 51, 204, 204, 102, 204, 204, 153, 204, 204, 204, 204, 204, 255, 204, 204, 0, 255, 204, 51, 255, 204, 102, 255, 204, 153, 255, 204, 204, 255, 204, 255, 255, 204, 0, 0, 255, 51, 0, 255, 102, 0, 255, 153, 0, 255, 204, 0, 255, 255, 0, 255, 0, 51, 255, 51, 51, 255, 102, 51, 255, 153, 51, 255, 204, 51, 255, 255, 51, 255, 0, 102, 255, 51, 102, 255, 102, 102, 255, 153, 102, 255, 204, 102, 255, 255, 102, 255, 0, 153, 255, 51, 153, 255, 102, 153, 255, 153, 153, 255, 204, 153, 255, 255, 153, 255, 0, 204, 255, 51, 204, 255, 102, 204, 255, 153, 204, 255, 204, 204, 255, 255, 204, 255, 0, 255, 255, 51, 255, 255, 102, 255, 255, 153, 255, 255, 204, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
mapping = [0, 15, 40, 45, 190, 220, 225]
def labels_encode(gt):
# return labels map encoded from P mode image
res = np.zeros_like(gt)
for idx, label in enumerate(mapping):
res[gt == label] = idx
return res
def labels_decode(output):
# return P mode image from labels map
res = np.zeros_like(output)
for i in range(7):
res[output==i]=mapping[i]
return res
def labels2RGB(labels):
# return RGB image converted from labels
img = Image.fromarray(labels.astype('uint8'))
img.putpalette(palette)
return img.convert('RGB')
"""# Dataset"""
# 1024
# [array([134.35576, 181.84496, 179.46925, 141.47711], dtype=float32)] [array([142.3712 , 167.54785, 165.98781, 139.46089], dtype=float32)]
# transform = transforms.Compose([
# transforms.Normalize(mean=[134.35576, 181.84496, 179.46925, 141.47711],std=[142.3712 , 167.54785, 165.98781, 139.46089]),
# ])
# 768
# transform = transforms.Compose([
# transforms.Normalize(mean=[132.03269, 178.74885, 176.47111, 139.48150],std=[129.54710, 154.04905, 152.75477, 128.39875]),
# ])
# 512
transform = transforms.Compose([
transforms.Normalize(mean=[127.40368, 171.65473, 169.60202, 135.26735],std=[110.52666, 132.01543, 131.15236, 111.38657]),
])
class TestDSA(torch.utils.data.Dataset):
def __init__(self):
# files = os.listdir('/input_path')
# newfiles = [data for data in files if re.match('.*tiff', data)]
# self.len = newfiles.__len__()//4
self.files = glob.glob('/input_path/test_A/*.tiff')
self.len = self.files.__len__()//4
def __getitem__(self, index):
index = index+1
data_dir = '/input_path/test_A/'
HH_dir = data_dir + str(index) + '_HH.tiff'
HV_dir = data_dir + str(index) + '_HV.tiff'
VH_dir = data_dir + str(index) + '_VH.tiff'
VV_dir = data_dir + str(index) + '_VV.tiff'
# gt_dir = data_dir + str(index) + '_gt.png'
img_HH = io.imread(HH_dir)
mask = img_HH == 0
img_HH = torch.from_numpy(img_HH.astype('float32')).unsqueeze(0)
img_HV = torch.from_numpy(io.imread(HV_dir).astype('float32')).unsqueeze(0)
img_VH = torch.from_numpy(io.imread(VH_dir).astype('float32')).unsqueeze(0)
img_VV = torch.from_numpy(io.imread(VV_dir).astype('float32')).unsqueeze(0)
# gt = np.array(Image.open(gt_dir).convert('P'))
# gt = labels_encode(gt)
# gt = torch.from_numpy(gt)
img = torch.cat((img_HH, img_HV, img_VH, img_VV), 0)
img[img>512]=512
img = transform(img)
return img,str(index),mask
def __len__(self):
return self.len
class TestDSB(torch.utils.data.Dataset):
def __init__(self):
# files = os.listdir('/input_path')
# newfiles = [data for data in files if re.match('.*tiff', data)]
# self.len = newfiles.__len__()//4
self.files = glob.glob('/input_path/test_B/*.tiff')
self.len = self.files.__len__()//4
def __getitem__(self, index):
index = index+1
data_dir = '/input_path/test_B/'
HH_dir = data_dir + str(index) + '_HH.tiff'
HV_dir = data_dir + str(index) + '_HV.tiff'
VH_dir = data_dir + str(index) + '_VH.tiff'
VV_dir = data_dir + str(index) + '_VV.tiff'
# gt_dir = data_dir + str(index) + '_gt.png'
img_HH = io.imread(HH_dir)
mask = img_HH == 0
img_HH = torch.from_numpy(img_HH.astype('float32')).unsqueeze(0)
img_HV = torch.from_numpy(io.imread(HV_dir).astype('float32')).unsqueeze(0)
img_VH = torch.from_numpy(io.imread(VH_dir).astype('float32')).unsqueeze(0)
img_VV = torch.from_numpy(io.imread(VV_dir).astype('float32')).unsqueeze(0)
# gt = np.array(Image.open(gt_dir).convert('P'))
# gt = labels_encode(gt)
# gt = torch.from_numpy(gt)
img = torch.cat((img_HH, img_HV, img_VH, img_VV), 0)
img[img>512]=512
img = transform(img)
return img,str(index),mask
def __len__(self):
return self.len
te_dsA = TestDSA()
test_loaderA = torch.utils.data.DataLoader(dataset=te_dsA, batch_size=8, shuffle=False, num_workers=2)
te_dsB = TestDSB()
test_loaderB = torch.utils.data.DataLoader(dataset=te_dsB, batch_size=8, shuffle=False, num_workers=2)
# class TestDS(torch.utils.data.Dataset):
# def __init__(self):
# # files = os.listdir('/input_path')
# # newfiles = [data for data in files if re.match('.*tiff', data)]
# # self.len = newfiles.__len__()//4
# self.files = glob.glob('/input_path/*.tiff')
# self.len = self.files.__len__()//4
# def __getitem__(self, index):
# index = index+1
# data_dir = '/input_path/'
# HH_dir = data_dir + str(index) + '_HH.tiff'
# HV_dir = data_dir + str(index) + '_HV.tiff'
# VH_dir = data_dir + str(index) + '_VH.tiff'
# VV_dir = data_dir + str(index) + '_VV.tiff'
# # gt_dir = data_dir + str(index) + '_gt.png'
# img_HH = io.imread(HH_dir)
# mask = img_HH == 0
# img_HH = torch.from_numpy(img_HH.astype('float32')).unsqueeze(0)
# img_HV = torch.from_numpy(io.imread(HV_dir).astype('float32')).unsqueeze(0)
# img_VH = torch.from_numpy(io.imread(VH_dir).astype('float32')).unsqueeze(0)
# img_VV = torch.from_numpy(io.imread(VV_dir).astype('float32')).unsqueeze(0)
# # gt = np.array(Image.open(gt_dir).convert('P'))
# # gt = labels_encode(gt)
# # gt = torch.from_numpy(gt)
# img = torch.cat((img_HH, img_HV, img_VH, img_VV), 0)
# img[img>512]=512
# img = transform(img)
# return img,str(index),mask
# def __len__(self):
# return self.len
# te_ds = TestDS()
# test_loader = torch.utils.data.DataLoader(dataset=te_ds, batch_size=4, shuffle=False, num_workers=1)
"""# read config"""
stream = open('/workspace/code/ocr_cfg.yaml', 'r')
cfg = yaml.load(stream, Loader=yaml.FullLoader)
"""# Build model"""
BN_MOMENTUM = 0.1
ALIGN_CORNERS = True
relu_inplace = True
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def BNReLU(num_features, bn_type=None, **kwargs):
return nn.Sequential(
nn.BatchNorm2d(num_features, **kwargs),
nn.ReLU()
)
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, h, w = probs.size(0), probs.size(1), probs.size(2), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1) # batch x hw x c
probs = F.softmax(self.scale * probs, dim=2)# batch x k x hw
ocr_context = torch.matmul(probs, feats)\
.permute(0, 2, 1).unsqueeze(3)# batch x k x c
return ocr_context
class _ObjectAttentionBlock(nn.Module):
'''
The basic implementation for object context block
Input:
N X C X H X W
Parameters:
in_channels : the dimension of the input feature map
key_channels : the dimension after the key/query transform
scale : choose the scale to downsample the input feature maps (save memory cost)
bn_type : specify the bn type
Return:
N X C X H X W
'''
def __init__(self,
in_channels,
key_channels,
scale=1,
bn_type=None):
super(_ObjectAttentionBlock, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
)
self.f_object = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
)
self.f_down = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
)
self.f_up = nn.Sequential(
nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.in_channels, bn_type=bn_type),
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
# add bg context ...
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(input=context, size=(h, w), mode='bilinear', align_corners=ALIGN_CORNERS)
return context
class ObjectAttentionBlock2D(_ObjectAttentionBlock):
def __init__(self, in_channels, key_channels, scale=1, bn_type=None):
super(ObjectAttentionBlock2D, self).__init__(in_channels,key_channels,scale, bn_type=bn_type)
class SpatialOCR_Module(nn.Module):
"""
Implementation of the OCR module:
We aggregate the global object representation to update the representation for each pixel.
"""
def __init__(self, in_channels, key_channels, out_channels, scale=1, dropout=0.1, bn_type=None):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock2D(in_channels, key_channels, scale, bn_type)
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
BNReLU(out_channels, bn_type=bn_type),
nn.Dropout2d(dropout)
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
output = self.conv_bn_dropout(torch.cat([context, feats], 1))
return output
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=relu_inplace)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
nn.BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear', align_corners=ALIGN_CORNERS)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, config, **kwargs):
global ALIGN_CORNERS
extra = cfg['MODEL']['EXTRA']
super(HighResolutionNet, self).__init__()
ALIGN_CORNERS = cfg['MODEL']['ALIGN_CORNERS']
# stem net
self.conv1 = nn.Conv2d(4, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
ocr_mid_channels = cfg['MODEL']['OCR']['MID_CHANNELS']
ocr_key_channels = cfg['MODEL']['OCR']['KEY_CHANNELS']
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(last_inp_channels, ocr_mid_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(ocr_mid_channels),
nn.ReLU(inplace=relu_inplace),
)
self.ocr_gather_head = SpatialGather_Module(cfg['DATASET']['NUM_CLASSES'])
self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
)
self.cls_head = nn.Conv2d(
ocr_mid_channels, cfg['DATASET']['NUM_CLASSES'], kernel_size=1, stride=1, padding=0, bias=True)
self.aux_head = nn.Sequential(
nn.Conv2d(last_inp_channels, last_inp_channels,
kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(last_inp_channels),
nn.ReLU(inplace=relu_inplace),
nn.Conv2d(last_inp_channels, cfg['DATASET']['NUM_CLASSES'],
kernel_size=1, stride=1, padding=0, bias=True)
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
nn.BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
# print(x1.shape)
# print(x2.shape)
# print(x3.shape)
feats = torch.cat([x[0], x1, x2, x3], 1)
# print(x.shape)
out_aux_seg = []
# ocr
out_aux = self.aux_head(feats)
# compute contrast feature
feats = self.conv3x3_ocr(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux_seg.append(out_aux)
out_aux_seg.append(out)
return out_aux_seg
def init_weights(self, pretrained='',):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def get_seg_model(cfg, **kwargs):
model = HighResolutionNet(cfg, **kwargs)
model.init_weights(cfg['MODEL']['PRETRAINED'])
return model
model = get_seg_model(cfg).cuda()
"""# Load model"""
checkpoint = torch.load('/workspace/code/c_9436_512.pth')
# checkpoint = torch.load('/workspace/code/c_9436_512.pth',map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
"""# Test model"""
# def write_res(output,mask,name):
# output[mask]=0
# save_img=labels_decode(output)
# save_img = Image.fromarray(save_img)
# save_img.putpalette(palette)
# save_img = save_img.convert('RGB')
# save_img.save('/output_path/'+name+'_gt.png')
# writeDoc(name+'_HH.tiff', name+'_gt.png', '/output_path/'+name+'.xml')
def write_resA(output,mask,name):
output[mask]=0
save_img=labels_decode(output)
save_img = Image.fromarray(save_img)
save_img.putpalette(palette)
save_img = save_img.convert('RGB')
save_img.save('/output_path/test_A/'+name+'_gt.png')
writeDoc(name+'_HH.tiff', name+'_gt.png', '/output_path/test_A/'+name+'.xml')
def write_resB(output,mask,name):
output[mask]=0
save_img=labels_decode(output)
save_img = Image.fromarray(save_img)
save_img.putpalette(palette)
save_img = save_img.convert('RGB')
save_img.save('/output_path/test_B/'+name+'_gt.png')
writeDoc(name+'_HH.tiff', name+'_gt.png', '/output_path/test_B/'+name+'.xml')
with torch.no_grad():
model.eval()
for img ,name,mask in test_loaderA:
img = img.cuda()
output = model(img)
output = F.interpolate(input = output[1], size = (512, 512), mode = 'bilinear', align_corners=True)
output = output.detach_().cpu()
output = np.asarray(np.argmax(output, axis=1), dtype=np.uint8)
for i in range(output.shape[0]):
threading.Thread(target = write_resA,args=(output[i],mask[i],name[i])).start()
# threading.Thread(target = write_res,args=(output[0],mask[0],name[0])).start()
# threading.Thread(target = write_res,args=(output[1],mask[1],name[1])).start()
# threading.Thread(target = write_res,args=(output[2],mask[2],name[2])).start()
# threading.Thread(target = write_res,args=(output[3],mask[3],name[3])).start()
for img ,name,mask in test_loaderB:
img = img.cuda()
output = model(img)
output = F.interpolate(input = output[1], size = (512, 512), mode = 'bilinear', align_corners=True)
output = output.detach_().cpu()
output = np.asarray(np.argmax(output, axis=1), dtype=np.uint8)
for i in range(output.shape[0]):
threading.Thread(target = write_resB,args=(output[i],mask[i],name[i])).start()
# threading.Thread(target = write_res,args=(output[0],mask[0],name[0])).start()
# threading.Thread(target = write_res,args=(output[1],mask[1],name[1])).start()
# threading.Thread(target = write_res,args=(output[2],mask[2],name[2])).start()
# threading.Thread(target = write_res,args=(output[3],mask[3],name[3])).start()
# for i in range(output.shape[0]):
# output[i][mask[i]]=0
# save_img=labels_decode(output[i])
# save_img = Image.fromarray(save_img)
# save_img.putpalette(palette)
# save_img = save_img.convert('RGB')
# save_img.save('/output_path/'+name[i]+'_gt.png')
# writeDoc(name[i]+'_HH.tiff', name[i]+'_gt.png', '/output_path/'+name[i]+'.xml')
| 42.267306 | 3,286 | 0.587568 |
from PIL import Image,ImagePalette
import numpy as np
import yaml
from skimage import io
from torchvision import transforms
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import xml.dom.minidom as xml
import glob
import threading
def writeDoc(filename_s,resultfile_s,path_s):
origin_s = 'GF2/GF3'
version_s = '4.0'
provider_s = '中国海洋大学'
author_s = '抹茶拿铁'
pluginname_s = '地物标注'
pluginclass_s = '标注'
time_s = '2020-07-2020-11'
doc = xml.Document()
annotation = doc.createElement('annotation')
source = doc.createElement('source')
filename = doc.createElement('filename')
origin = doc.createElement('origin')
research = doc.createElement('research')
version = doc.createElement('version')
provider = doc.createElement('provider')
author = doc.createElement('author')
pluginname = doc.createElement('pluginname')
pluginclass = doc.createElement('pluginclass')
time = doc.createElement('time')
segmentation = doc.createElement('segmentation')
resultfile = doc.createElement('resultfile')
filename.appendChild(doc.createTextNode(filename_s))
origin.appendChild(doc.createTextNode(origin_s))
version.appendChild(doc.createTextNode(version_s))
provider.appendChild(doc.createTextNode(provider_s))
author.appendChild(doc.createTextNode(author_s))
pluginname.appendChild(doc.createTextNode(pluginname_s))
pluginclass.appendChild(doc.createTextNode(pluginclass_s))
time.appendChild(doc.createTextNode(time_s))
resultfile.appendChild(doc.createTextNode(resultfile_s))
doc.appendChild(annotation)
annotation.appendChild(source)
annotation.appendChild(research)
annotation.appendChild(segmentation)
source.appendChild(filename)
source.appendChild(origin)
research.appendChild(version)
research.appendChild(provider)
research.appendChild(author)
research.appendChild(pluginname)
research.appendChild(pluginclass)
research.appendChild(time)
segmentation.appendChild(resultfile)
with open(path_s, 'wb') as fp:
fp.write(doc.toprettyxml(indent='\t',newl='\n',encoding='utf-8'))
fp.close()
palette = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 0, 102, 0, 0, 153, 0, 0, 204, 0, 0, 255, 0, 0, 0, 51, 0, 51, 51, 0, 102, 51, 0, 153, 51, 0, 204, 51, 0, 255, 51, 0, 0, 102, 0, 51, 102, 0, 102, 102, 0, 153, 102, 0, 204, 102, 0, 255, 102, 0, 0, 153, 0, 51, 153, 0, 102, 153, 0, 153, 153, 0, 204, 153, 0, 255, 153, 0, 0, 204, 0, 51, 204, 0, 102, 204, 0, 153, 204, 0, 204, 204, 0, 255, 204, 0, 0, 255, 0, 51, 255, 0, 102, 255, 0, 153, 255, 0, 204, 255, 0, 255, 255, 0, 0, 0, 51, 51, 0, 51, 102, 0, 51, 153, 0, 51, 204, 0, 51, 255, 0, 51, 0, 51, 51, 51, 51, 51, 102, 51, 51, 153, 51, 51, 204, 51, 51, 255, 51, 51, 0, 102, 51, 51, 102, 51, 102, 102, 51, 153, 102, 51, 204, 102, 51, 255, 102, 51, 0, 153, 51, 51, 153, 51, 102, 153, 51, 153, 153, 51, 204, 153, 51, 255, 153, 51, 0, 204, 51, 51, 204, 51, 102, 204, 51, 153, 204, 51, 204, 204, 51, 255, 204, 51, 0, 255, 51, 51, 255, 51, 102, 255, 51, 153, 255, 51, 204, 255, 51, 255, 255, 51, 0, 0, 102, 51, 0, 102, 102, 0, 102, 153, 0, 102, 204, 0, 102, 255, 0, 102, 0, 51, 102, 51, 51, 102, 102, 51, 102, 153, 51, 102, 204, 51, 102, 255, 51, 102, 0, 102, 102, 51, 102, 102, 102, 102, 102, 153, 102, 102, 204, 102, 102, 255, 102, 102, 0, 153, 102, 51, 153, 102, 102, 153, 102, 153, 153, 102, 204, 153, 102, 255, 153, 102, 0, 204, 102, 51, 204, 102, 102, 204, 102, 153, 204, 102, 204, 204, 102, 255, 204, 102, 0, 255, 102, 51, 255, 102, 102, 255, 102, 153, 255, 102, 204, 255, 102, 255, 255, 102, 0, 0, 153, 51, 0, 153, 102, 0, 153, 153, 0, 153, 204, 0, 153, 255, 0, 153, 0, 51, 153, 51, 51, 153, 102, 51, 153, 153, 51, 153, 204, 51, 153, 255, 51, 153, 0, 102, 153, 51, 102, 153, 102, 102, 153, 153, 102, 153, 204, 102, 153, 255, 102, 153, 0, 153, 153, 51, 153, 153, 102, 153, 153, 153, 153, 153, 204, 153, 153, 255, 153, 153, 0, 204, 153, 51, 204, 153, 102, 204, 153, 153, 204, 153, 204, 204, 153, 255, 204, 153, 0, 255, 153, 51, 255, 153, 102, 255, 153, 153, 255, 153, 204, 255, 153, 255, 255, 153, 0, 0, 204, 51, 0, 204, 102, 0, 204, 153, 0, 204, 204, 0, 204, 255, 0, 204, 0, 51, 204, 51, 51, 204, 102, 51, 204, 153, 51, 204, 204, 51, 204, 255, 51, 204, 0, 102, 204, 51, 102, 204, 102, 102, 204, 153, 102, 204, 204, 102, 204, 255, 102, 204, 0, 153, 204, 51, 153, 204, 102, 153, 204, 153, 153, 204, 204, 153, 204, 255, 153, 204, 0, 204, 204, 51, 204, 204, 102, 204, 204, 153, 204, 204, 204, 204, 204, 255, 204, 204, 0, 255, 204, 51, 255, 204, 102, 255, 204, 153, 255, 204, 204, 255, 204, 255, 255, 204, 0, 0, 255, 51, 0, 255, 102, 0, 255, 153, 0, 255, 204, 0, 255, 255, 0, 255, 0, 51, 255, 51, 51, 255, 102, 51, 255, 153, 51, 255, 204, 51, 255, 255, 51, 255, 0, 102, 255, 51, 102, 255, 102, 102, 255, 153, 102, 255, 204, 102, 255, 255, 102, 255, 0, 153, 255, 51, 153, 255, 102, 153, 255, 153, 153, 255, 204, 153, 255, 255, 153, 255, 0, 204, 255, 51, 204, 255, 102, 204, 255, 153, 204, 255, 204, 204, 255, 255, 204, 255, 0, 255, 255, 51, 255, 255, 102, 255, 255, 153, 255, 255, 204, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
mapping = [0, 15, 40, 45, 190, 220, 225]
def labels_encode(gt):
res = np.zeros_like(gt)
for idx, label in enumerate(mapping):
res[gt == label] = idx
return res
def labels_decode(output):
res = np.zeros_like(output)
for i in range(7):
res[output==i]=mapping[i]
return res
def labels2RGB(labels):
img = Image.fromarray(labels.astype('uint8'))
img.putpalette(palette)
return img.convert('RGB')
transform = transforms.Compose([
transforms.Normalize(mean=[127.40368, 171.65473, 169.60202, 135.26735],std=[110.52666, 132.01543, 131.15236, 111.38657]),
])
class TestDSA(torch.utils.data.Dataset):
def __init__(self):
self.files = glob.glob('/input_path/test_A/*.tiff')
self.len = self.files.__len__()//4
def __getitem__(self, index):
index = index+1
data_dir = '/input_path/test_A/'
HH_dir = data_dir + str(index) + '_HH.tiff'
HV_dir = data_dir + str(index) + '_HV.tiff'
VH_dir = data_dir + str(index) + '_VH.tiff'
VV_dir = data_dir + str(index) + '_VV.tiff'
img_HH = io.imread(HH_dir)
mask = img_HH == 0
img_HH = torch.from_numpy(img_HH.astype('float32')).unsqueeze(0)
img_HV = torch.from_numpy(io.imread(HV_dir).astype('float32')).unsqueeze(0)
img_VH = torch.from_numpy(io.imread(VH_dir).astype('float32')).unsqueeze(0)
img_VV = torch.from_numpy(io.imread(VV_dir).astype('float32')).unsqueeze(0)
img = torch.cat((img_HH, img_HV, img_VH, img_VV), 0)
img[img>512]=512
img = transform(img)
return img,str(index),mask
def __len__(self):
return self.len
class TestDSB(torch.utils.data.Dataset):
def __init__(self):
self.files = glob.glob('/input_path/test_B/*.tiff')
self.len = self.files.__len__()//4
def __getitem__(self, index):
index = index+1
data_dir = '/input_path/test_B/'
HH_dir = data_dir + str(index) + '_HH.tiff'
HV_dir = data_dir + str(index) + '_HV.tiff'
VH_dir = data_dir + str(index) + '_VH.tiff'
VV_dir = data_dir + str(index) + '_VV.tiff'
img_HH = io.imread(HH_dir)
mask = img_HH == 0
img_HH = torch.from_numpy(img_HH.astype('float32')).unsqueeze(0)
img_HV = torch.from_numpy(io.imread(HV_dir).astype('float32')).unsqueeze(0)
img_VH = torch.from_numpy(io.imread(VH_dir).astype('float32')).unsqueeze(0)
img_VV = torch.from_numpy(io.imread(VV_dir).astype('float32')).unsqueeze(0)
img = torch.cat((img_HH, img_HV, img_VH, img_VV), 0)
img[img>512]=512
img = transform(img)
return img,str(index),mask
def __len__(self):
return self.len
te_dsA = TestDSA()
test_loaderA = torch.utils.data.DataLoader(dataset=te_dsA, batch_size=8, shuffle=False, num_workers=2)
te_dsB = TestDSB()
test_loaderB = torch.utils.data.DataLoader(dataset=te_dsB, batch_size=8, shuffle=False, num_workers=2)
anes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def BNReLU(num_features, bn_type=None, **kwargs):
return nn.Sequential(
nn.BatchNorm2d(num_features, **kwargs),
nn.ReLU()
)
class SpatialGather_Module(nn.Module):
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, h, w = probs.size(0), probs.size(1), probs.size(2), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats)\
.permute(0, 2, 1).unsqueeze(3)
return ocr_context
class _ObjectAttentionBlock(nn.Module):
def __init__(self,
in_channels,
key_channels,
scale=1,
bn_type=None):
super(_ObjectAttentionBlock, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
)
self.f_object = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
)
self.f_down = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels, bn_type=bn_type),
)
self.f_up = nn.Sequential(
nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.in_channels, bn_type=bn_type),
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(input=context, size=(h, w), mode='bilinear', align_corners=ALIGN_CORNERS)
return context
class ObjectAttentionBlock2D(_ObjectAttentionBlock):
def __init__(self, in_channels, key_channels, scale=1, bn_type=None):
super(ObjectAttentionBlock2D, self).__init__(in_channels,key_channels,scale, bn_type=bn_type)
class SpatialOCR_Module(nn.Module):
def __init__(self, in_channels, key_channels, out_channels, scale=1, dropout=0.1, bn_type=None):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock2D(in_channels, key_channels, scale, bn_type)
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
BNReLU(out_channels, bn_type=bn_type),
nn.Dropout2d(dropout)
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
output = self.conv_bn_dropout(torch.cat([context, feats], 1))
return output
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=relu_inplace)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
nn.BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear', align_corners=ALIGN_CORNERS)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, config, **kwargs):
global ALIGN_CORNERS
extra = cfg['MODEL']['EXTRA']
super(HighResolutionNet, self).__init__()
ALIGN_CORNERS = cfg['MODEL']['ALIGN_CORNERS']
self.conv1 = nn.Conv2d(4, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
ocr_mid_channels = cfg['MODEL']['OCR']['MID_CHANNELS']
ocr_key_channels = cfg['MODEL']['OCR']['KEY_CHANNELS']
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(last_inp_channels, ocr_mid_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(ocr_mid_channels),
nn.ReLU(inplace=relu_inplace),
)
self.ocr_gather_head = SpatialGather_Module(cfg['DATASET']['NUM_CLASSES'])
self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
)
self.cls_head = nn.Conv2d(
ocr_mid_channels, cfg['DATASET']['NUM_CLASSES'], kernel_size=1, stride=1, padding=0, bias=True)
self.aux_head = nn.Sequential(
nn.Conv2d(last_inp_channels, last_inp_channels,
kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(last_inp_channels),
nn.ReLU(inplace=relu_inplace),
nn.Conv2d(last_inp_channels, cfg['DATASET']['NUM_CLASSES'],
kernel_size=1, stride=1, padding=0, bias=True)
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
nn.BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
feats = torch.cat([x[0], x1, x2, x3], 1)
out_aux_seg = []
out_aux = self.aux_head(feats)
feats = self.conv3x3_ocr(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux_seg.append(out_aux)
out_aux_seg.append(out)
return out_aux_seg
def init_weights(self, pretrained='',):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def get_seg_model(cfg, **kwargs):
model = HighResolutionNet(cfg, **kwargs)
model.init_weights(cfg['MODEL']['PRETRAINED'])
return model
model = get_seg_model(cfg).cuda()
checkpoint = torch.load('/workspace/code/c_9436_512.pth')
model.load_state_dict(checkpoint['state_dict'])
def write_resA(output,mask,name):
output[mask]=0
save_img=labels_decode(output)
save_img = Image.fromarray(save_img)
save_img.putpalette(palette)
save_img = save_img.convert('RGB')
save_img.save('/output_path/test_A/'+name+'_gt.png')
writeDoc(name+'_HH.tiff', name+'_gt.png', '/output_path/test_A/'+name+'.xml')
def write_resB(output,mask,name):
output[mask]=0
save_img=labels_decode(output)
save_img = Image.fromarray(save_img)
save_img.putpalette(palette)
save_img = save_img.convert('RGB')
save_img.save('/output_path/test_B/'+name+'_gt.png')
writeDoc(name+'_HH.tiff', name+'_gt.png', '/output_path/test_B/'+name+'.xml')
with torch.no_grad():
model.eval()
for img ,name,mask in test_loaderA:
img = img.cuda()
output = model(img)
output = F.interpolate(input = output[1], size = (512, 512), mode = 'bilinear', align_corners=True)
output = output.detach_().cpu()
output = np.asarray(np.argmax(output, axis=1), dtype=np.uint8)
for i in range(output.shape[0]):
threading.Thread(target = write_resA,args=(output[i],mask[i],name[i])).start()
for img ,name,mask in test_loaderB:
img = img.cuda()
output = model(img)
output = F.interpolate(input = output[1], size = (512, 512), mode = 'bilinear', align_corners=True)
output = output.detach_().cpu()
output = np.asarray(np.argmax(output, axis=1), dtype=np.uint8)
for i in range(output.shape[0]):
threading.Thread(target = write_resB,args=(output[i],mask[i],name[i])).start()
| true | true |
f7f4bf0eb39d44e3864f7b087c652ccf7ed75d87 | 589 | py | Python | stepik/stepik 1_6_7.py | vittorio5/python_training | 8efba515d3e8da343dd038acee176f5ee021b230 | [
"Apache-2.0"
] | null | null | null | stepik/stepik 1_6_7.py | vittorio5/python_training | 8efba515d3e8da343dd038acee176f5ee021b230 | [
"Apache-2.0"
] | null | null | null | stepik/stepik 1_6_7.py | vittorio5/python_training | 8efba515d3e8da343dd038acee176f5ee021b230 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
import time
from selenium.webdriver.common.by import By
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/huge_form.html")
elements = browser.find_elements(By.TAG_NAME, "input")
for element in elements:
element.send_keys("Мой ответ")
button = browser.find_element(By.CSS_SELECTOR, "button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла | 28.047619 | 64 | 0.724958 | from selenium import webdriver
import time
from selenium.webdriver.common.by import By
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/huge_form.html")
elements = browser.find_elements(By.TAG_NAME, "input")
for element in elements:
element.send_keys("Мой ответ")
button = browser.find_element(By.CSS_SELECTOR, "button.btn")
button.click()
finally:
time.sleep(30)
browser.quit()
| true | true |
f7f4c13112143253c489354d4e72eec33b7521a1 | 29 | py | Python | ciw/trackers/__init__.py | KAI10/Ciw | be035267e197ac75f8da5f0d966ef02dffb3692f | [
"MIT"
] | 107 | 2016-11-18T22:44:58.000Z | 2022-03-29T01:38:12.000Z | ciw/trackers/__init__.py | KAI10/Ciw | be035267e197ac75f8da5f0d966ef02dffb3692f | [
"MIT"
] | 117 | 2016-09-25T19:12:39.000Z | 2022-03-31T14:01:47.000Z | ciw/trackers/__init__.py | KAI10/Ciw | be035267e197ac75f8da5f0d966ef02dffb3692f | [
"MIT"
] | 34 | 2016-12-21T12:04:29.000Z | 2022-03-29T10:46:29.000Z | from .state_tracker import *
| 14.5 | 28 | 0.793103 | from .state_tracker import *
| true | true |
f7f4c1fcb6d95e6e2ba354b9da6b781d0b1ed22f | 26,234 | py | Python | old_projects/triangle_of_power/triangle.py | ianyhx/manim | 9df81ef2526025a699053e409e9543a345e670ba | [
"MIT"
] | null | null | null | old_projects/triangle_of_power/triangle.py | ianyhx/manim | 9df81ef2526025a699053e409e9543a345e670ba | [
"MIT"
] | null | null | null | old_projects/triangle_of_power/triangle.py | ianyhx/manim | 9df81ef2526025a699053e409e9543a345e670ba | [
"MIT"
] | null | null | null | import numbers
from big_ol_pile_of_manim_imports import *
from functools import reduce
OPERATION_COLORS = [YELLOW, GREEN, BLUE_B]
def get_equation(index, x = 2, y = 3, z = 8, expression_only = False):
assert(index in [0, 1, 2])
if index == 0:
tex1 = "\\sqrt[%d]{%d}"%(y, z),
tex2 = " = %d"%x
elif index == 1:
tex1 = "\\log_%d(%d)"%(x, z),
tex2 = " = %d"%y
elif index == 2:
tex1 = "%d^%d"%(x, y),
tex2 = " = %d"%z
if expression_only:
tex = tex1
else:
tex = tex1+tex2
return TexMobject(tex).set_color(OPERATION_COLORS[index])
def get_inverse_rules():
return map(TexMobject, [
"x^{\\log_x(z)} = z",
"\\log_x\\left(x^y \\right) = y",
"\\sqrt[y]{x^y} = x",
"\\left(\\sqrt[y]{z}\\right)^y = z",
"\\sqrt[\\log_x(z)]{z} = x",
"\\log_{\\sqrt[y]{z}}(z) = y",
])
def get_top_inverse_rules():
result = []
pairs = [#Careful of order here!
(0, 2),
(0, 1),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
for i, j in pairs:
top = get_top_inverse(i, j)
char = ["x", "y", "z"][j]
eq = TexMobject("= %s"%char)
eq.scale(2)
eq.next_to(top, RIGHT)
diff = eq.get_center() - top.triangle.get_center()
eq.shift(diff[1]*UP)
result.append(VMobject(top, eq))
return result
def get_top_inverse(i, j):
args = [None]*3
k = set([0, 1, 2]).difference([i, j]).pop()
args[i] = ["x", "y", "z"][i]
big_top = TOP(*args)
args[j] = ["x", "y", "z"][j]
lil_top = TOP(*args, triangle_height_to_number_height = 1.5)
big_top.set_value(k, lil_top)
return big_top
class TOP(VMobject):
CONFIG = {
"triangle_height_to_number_height" : 3,
"offset_multiple" : 1.5,
"radius" : 1.5,
"propagate_style_to_family" : False,
}
def __init__(self, x = None, y = None, z = None, **kwargs):
digest_config(self, kwargs, locals())
VMobject.__init__(self, **kwargs)
def generate_points(self):
vertices = [
self.radius*rotate_vector(RIGHT, 7*np.pi/6 - i*2*np.pi/3)
for i in range(3)
]
self.triangle = Polygon(
*vertices,
color = WHITE,
stroke_width = 5
)
self.values = [VMobject()]*3
self.set_values(self.x, self.y, self.z)
def set_values(self, x, y, z):
for i, mob in enumerate([x, y, z]):
self.set_value(i, mob)
def set_value(self, index, value):
self.values[index] = self.put_on_vertex(index, value)
self.reset_submobjects()
def put_on_vertex(self, index, value):
assert(index in [0, 1, 2])
if value is None:
value = VectorizedPoint()
if isinstance(value, numbers.Number):
value = str(value)
if isinstance(value, str):
value = TexMobject(value)
if isinstance(value, TOP):
return self.put_top_on_vertix(index, value)
self.rescale_corner_mobject(value)
value.center()
if index == 0:
offset = -value.get_corner(UP+RIGHT)
elif index == 1:
offset = -value.get_bottom()
elif index == 2:
offset = -value.get_corner(UP+LEFT)
value.shift(self.offset_multiple*offset)
anchors = self.triangle.get_anchors_and_handles()[0]
value.shift(anchors[index])
return value
def put_top_on_vertix(self, index, top):
top.scale_to_fit_height(2*self.get_value_height())
vertices = np.array(top.get_vertices())
vertices[index] = 0
start = reduce(op.add, vertices)/2
end = self.triangle.get_anchors_and_handles()[0][index]
top.shift(end-start)
return top
def put_in_vertex(self, index, mobject):
self.rescale_corner_mobject(mobject)
mobject.center()
mobject.shift(interpolate(
self.get_center(),
self.get_vertices()[index],
0.7
))
return mobject
def get_surrounding_circle(self, color = YELLOW):
return Circle(
radius = 1.7*self.radius,
color = color
).shift(
self.triangle.get_center(),
(self.triangle.get_height()/6)*DOWN
)
def rescale_corner_mobject(self, mobject):
mobject.scale_to_fit_height(self.get_value_height())
return self
def get_value_height(self):
return self.triangle.get_height()/self.triangle_height_to_number_height
def get_center(self):
return center_of_mass(self.get_vertices())
def get_vertices(self):
return self.triangle.get_anchors_and_handles()[0][:3]
def reset_submobjects(self):
self.submobjects = [self.triangle] + self.values
return self
class IntroduceNotation(Scene):
def construct(self):
top = TOP()
equation = TexMobject("2^3 = 8")
equation.to_corner(UP+LEFT)
two, three, eight = [
top.put_on_vertex(i, num)
for i, num in enumerate([2, 3, 8])
]
self.play(FadeIn(equation))
self.wait()
self.play(ShowCreation(top))
for num in two, three, eight:
self.play(ShowCreation(num), run_time=2)
self.wait()
class ShowRule(Scene):
args_list = [(0,), (1,), (2,)]
@staticmethod
def args_to_string(index):
return str(index)
@staticmethod
def string_to_args(index_string):
result = int(index_string)
assert(result in [0, 1, 2])
return result
def construct(self, index):
equation = get_equation(index)
equation.to_corner(UP+LEFT)
top = TOP(2, 3, 8)
new_top = top.copy()
equals = TexMobject("=").scale(1.5)
new_top.next_to(equals, LEFT, buff = 1)
new_top.values[index].next_to(equals, RIGHT, buff = 1)
circle = Circle(
radius = 1.7*top.radius,
color = OPERATION_COLORS[index]
)
self.add(equation, top)
self.wait()
self.play(
Transform(top, new_top),
ShowCreation(equals)
)
circle.shift(new_top.triangle.get_center_of_mass())
new_circle = circle.copy()
new_top.put_on_vertex(index, new_circle)
self.wait()
self.play(ShowCreation(circle))
self.wait()
self.play(
Transform(circle, new_circle),
ApplyMethod(new_top.values[index].set_color, circle.color)
)
self.wait()
class AllThree(Scene):
def construct(self):
tops = []
equations = []
args = (2, 3, 8)
for i in 2, 1, 0:
new_args = list(args)
new_args[i] = None
top = TOP(*new_args, triangle_height_to_number_height = 2)
# top.set_color(OPERATION_COLORS[i])
top.shift(i*4.5*LEFT)
equation = get_equation(i, expression_only = True)
equation.scale(3)
equation.next_to(top, DOWN, buff = 0.7)
tops.append(top)
equations.append(equation)
VMobject(*tops+equations).center()
# name = TextMobject("Triangle of Power")
# name.to_edge(UP)
for top, eq in zip(tops, equations):
self.play(FadeIn(top), FadeIn(eq))
self.wait(3)
# self.play(Write(name))
self.wait()
class SixDifferentInverses(Scene):
def construct(self):
rules = get_inverse_rules()
vects = it.starmap(op.add, it.product(
[3*UP, 0.5*UP, 2*DOWN], [2*LEFT, 2*RIGHT]
))
for rule, vect in zip(rules, vects):
rule.shift(vect)
general_idea = TexMobject("f(f^{-1}(a)) = a")
self.play(Write(VMobject(*rules)))
self.wait()
for s, color in (rules[:4], GREEN), (rules[4:], RED):
mob = VMobject(*s)
self.play(ApplyMethod(mob.set_color, color))
self.wait()
self.play(ApplyMethod(mob.set_color, WHITE))
self.play(
ApplyMethod(VMobject(*rules[::2]).to_edge, LEFT),
ApplyMethod(VMobject(*rules[1::2]).to_edge, RIGHT),
GrowFromCenter(general_idea)
)
self.wait()
top_rules = get_top_inverse_rules()
for rule, top_rule in zip(rules, top_rules):
top_rule.scale_to_fit_height(1.5)
top_rule.center()
top_rule.shift(rule.get_center())
self.play(*map(FadeOut, rules))
self.remove(*rules)
self.play(*map(GrowFromCenter, top_rules))
self.wait()
self.remove(general_idea)
rules = get_inverse_rules()
original = None
for i, (top_rule, rule) in enumerate(zip(top_rules, rules)):
rule.center().to_edge(UP)
rule.set_color(GREEN if i < 4 else RED)
self.add(rule)
new_top_rule = top_rule.copy().center().scale(1.5)
anims = [Transform(top_rule, new_top_rule)]
if original is not None:
anims.append(FadeIn(original))
original = top_rule.copy()
self.play(*anims)
self.wait()
self.animate_top_rule(top_rule)
self.remove(rule)
def animate_top_rule(self, top_rule):
lil_top, lil_symbol, symbol_index = None, None, None
big_top = top_rule.submobjects[0]
equals, right_symbol = top_rule.submobjects[1].split()
for i, value in enumerate(big_top.values):
if isinstance(value, TOP):
lil_top = value
elif isinstance(value, TexMobject):
symbol_index = i
else:
lil_symbol_index = i
lil_symbol = lil_top.values[lil_symbol_index]
assert(lil_top is not None and lil_symbol is not None)
cancel_parts = [
VMobject(top.triangle, top.values[symbol_index])
for top in (lil_top, big_top)
]
new_symbol = lil_symbol.copy()
new_symbol.replace(right_symbol)
vect = equals.get_center() - right_symbol.get_center()
new_symbol.shift(2*vect[0]*RIGHT)
self.play(
Transform(*cancel_parts, rate_func = rush_into)
)
self.play(
FadeOut(VMobject(*cancel_parts)),
Transform(lil_symbol, new_symbol, rate_func = rush_from)
)
self.wait()
self.remove(lil_symbol, top_rule, VMobject(*cancel_parts))
class SixSixSix(Scene):
def construct(self):
randy = Randolph(mode = "pondering").to_corner()
bubble = ThoughtBubble().pin_to(randy)
rules = get_inverse_rules()
sixes = TexMobject(["6", "6", "6"], next_to_buff = 1)
sixes.to_corner(UP+RIGHT)
sixes = sixes.split()
speech_bubble = SpeechBubble()
speech_bubble.pin_to(randy)
speech_bubble.write("I'll just study art!")
self.add(randy)
self.play(ShowCreation(bubble))
bubble.add_content(VectorizedPoint())
for i, rule in enumerate(rules):
if i%2 == 0:
anim = ShowCreation(sixes[i/2])
else:
anim = Blink(randy)
self.play(
ApplyMethod(bubble.add_content, rule),
anim
)
self.wait()
self.wait()
words = speech_bubble.content
equation = bubble.content
speech_bubble.clear()
bubble.clear()
self.play(
ApplyMethod(randy.change_mode, "angry"),
Transform(bubble, speech_bubble),
Transform(equation, words),
FadeOut(VMobject(*sixes))
)
self.wait()
class AdditiveProperty(Scene):
def construct(self):
exp_rule, log_rule = self.write_old_style_rules()
t_exp_rule, t_log_rule = self.get_new_style_rules()
self.play(
ApplyMethod(exp_rule.to_edge, UP),
ApplyMethod(log_rule.to_edge, DOWN, 1.5)
)
t_exp_rule.next_to(exp_rule, DOWN)
t_exp_rule.set_color(GREEN)
t_log_rule.next_to(log_rule, UP)
t_log_rule.set_color(RED)
self.play(
FadeIn(t_exp_rule),
FadeIn(t_log_rule),
ApplyMethod(exp_rule.set_color, GREEN),
ApplyMethod(log_rule.set_color, RED),
)
self.wait()
all_tops = filter(
lambda m : isinstance(m, TOP),
t_exp_rule.split()+t_log_rule.split()
)
self.put_in_circles(all_tops)
self.set_color_appropriate_parts(t_exp_rule, t_log_rule)
def write_old_style_rules(self):
start = TexMobject("a^x a^y = a^{x+y}")
end = TexMobject("\\log_a(xy) = \\log_a(x) + \\log_a(y)")
start.shift(UP)
end.shift(DOWN)
a1, x1, a2, y1, eq1, a3, p1, x2, y2 = start.split()
a4, x3, y3, eq2, a5, x4, p2, a6, y4 = np.array(end.split())[
[3, 5, 6, 8, 12, 14, 16, 20, 22]
]
start_copy = start.copy()
self.play(Write(start_copy))
self.wait()
self.play(Transform(
VMobject(a1, x1, a2, y1, eq1, a3, p1, x2, a3.copy(), y2),
VMobject(a4, x3, a4.copy(), y3, eq2, a5, p2, x4, a6, y4)
))
self.play(Write(end))
self.clear()
self.add(start_copy, end)
self.wait()
return start_copy, end
def get_new_style_rules(self):
upper_mobs = [
TOP("a", "x", "R"), Dot(),
TOP("a", "y", "R"), TexMobject("="),
TOP("a", "x+y")
]
lower_mobs = [
TOP("a", None, "xy"), TexMobject("="),
TOP("a", None, "x"), TexMobject("+"),
TOP("a", None, "y"),
]
for mob in upper_mobs + lower_mobs:
if isinstance(mob, TOP):
mob.scale(0.5)
for group in upper_mobs, lower_mobs:
for m1, m2 in zip(group, group[1:]):
m2.next_to(m1)
for top in upper_mobs[0], upper_mobs[2]:
top.set_value(2, None)
upper_mobs = VMobject(*upper_mobs).center().shift(2*UP)
lower_mobs = VMobject(*lower_mobs).center().shift(2*DOWN)
return upper_mobs, lower_mobs
def put_in_circles(self, tops):
anims = []
for top in tops:
for i, value in enumerate(top.values):
if isinstance(value, VectorizedPoint):
index = i
circle = top.put_on_vertex(index, Circle(color = WHITE))
anims.append(
Transform(top.copy().set_color(YELLOW), circle)
)
self.add(*[anim.mobject for anim in anims])
self.wait()
self.play(*anims)
self.wait()
def set_color_appropriate_parts(self, t_exp_rule, t_log_rule):
#Horribly hacky
circle1 = t_exp_rule.split()[0].put_on_vertex(
2, Circle()
)
top_dot = t_exp_rule.split()[1]
circle2 = t_exp_rule.split()[2].put_on_vertex(
2, Circle()
)
top_plus = t_exp_rule.split()[4].values[1]
bottom_times = t_log_rule.split()[0].values[2]
circle3 = t_log_rule.split()[2].put_on_vertex(
1, Circle()
)
bottom_plus = t_log_rule.split()[3]
circle4 = t_log_rule.split()[4].put_on_vertex(
1, Circle()
)
mob_lists = [
[circle1, top_dot, circle2],
[top_plus],
[bottom_times],
[circle3, bottom_plus, circle4]
]
for mobs in mob_lists:
copies = VMobject(*mobs).copy()
self.play(ApplyMethod(
copies.set_color, YELLOW,
run_time = 0.5
))
self.play(ApplyMethod(
copies.scale_in_place, 1.2,
rate_func = there_and_back
))
self.wait()
self.remove(copies)
class DrawInsideTriangle(Scene):
def construct(self):
top = TOP()
top.scale(2)
dot = top.put_in_vertex(0, Dot())
plus = top.put_in_vertex(1, TexMobject("+"))
times = top.put_in_vertex(2, TexMobject("\\times"))
plus.set_color(GREEN)
times.set_color(YELLOW)
self.add(top)
self.wait()
for mob in dot, plus, times:
self.play(Write(mob, run_time = 1))
self.wait()
class ConstantOnTop(Scene):
def construct(self):
top = TOP()
dot = top.put_in_vertex(1, Dot())
times1 = top.put_in_vertex(0, TexMobject("\\times"))
times2 = top.put_in_vertex(2, TexMobject("\\times"))
times1.set_color(YELLOW)
times2.set_color(YELLOW)
three = top.put_on_vertex(1, "3")
lower_left_x = top.put_on_vertex(0, "x")
lower_right_x = top.put_on_vertex(2, "x")
x_cubed = TexMobject("x^3").to_edge(UP)
x_cubed.submobjects.reverse() #To align better
cube_root_x = TexMobject("\\sqrt[3]{x}").to_edge(UP)
self.add(top)
self.play(ShowCreation(three))
self.play(
FadeIn(lower_left_x),
Write(x_cubed),
run_time = 1
)
self.wait()
self.play(*[
Transform(*pair, path_arc = np.pi)
for pair in [
(lower_left_x, lower_right_x),
(x_cubed, cube_root_x),
]
])
self.wait(2)
for mob in dot, times1, times2:
self.play(ShowCreation(mob))
self.wait()
def get_const_top_TOP(*args):
top = TOP(*args)
dot = top.put_in_vertex(1, Dot())
times1 = top.put_in_vertex(0, TexMobject("\\times"))
times2 = top.put_in_vertex(2, TexMobject("\\times"))
times1.set_color(YELLOW)
times2.set_color(YELLOW)
top.add(dot, times1, times2)
return top
class MultiplyWithConstantTop(Scene):
def construct(self):
top1 = get_const_top_TOP("x", "3")
top2 = get_const_top_TOP("y", "3")
top3 = get_const_top_TOP("xy", "3")
times = TexMobject("\\times")
equals = TexMobject("=")
top_exp_equation = VMobject(
top1, times, top2, equals, top3
)
top_exp_equation.arrange_submobjects()
old_style_exp = TexMobject("(x^3)(y^3) = (xy)^3")
old_style_exp.to_edge(UP)
old_style_exp.set_color(GREEN)
old_style_rad = TexMobject("\\sqrt[3]{x} \\sqrt[3]{y} = \\sqrt[3]{xy}")
old_style_rad.to_edge(UP)
old_style_rad.set_color(RED)
self.add(top_exp_equation, old_style_exp)
self.wait(3)
old_tops = [top1, top2, top3]
new_tops = []
for top in old_tops:
new_top = top.copy()
new_top.put_on_vertex(2, new_top.values[0])
new_top.shift(0.5*LEFT)
new_tops.append(new_top)
self.play(
Transform(old_style_exp, old_style_rad),
Transform(
VMobject(*old_tops),
VMobject(*new_tops),
path_arc = np.pi/2
)
)
self.wait(3)
class RightStaysConstantQ(Scene):
def construct(self):
top1, top2, top3 = old_tops = [
TOP(None, s, "8")
for s in ("x", "y", TexMobject("x?y"))
]
q_mark = TexMobject("?").scale(2)
equation = VMobject(
top1, q_mark, top2, TexMobject("="), top3
)
equation.arrange_submobjects(buff = 0.7)
symbols_at_top = VMobject(*[
top.values[1]
for top in (top1, top2, top3)
])
symbols_at_lower_right = VMobject(*[
top.put_on_vertex(0, top.values[1].copy())
for top in (top1, top2, top3)
])
old_style_eq1 = TexMobject("\\sqrt[x]{8} ? \\sqrt[y]{8} = \\sqrt[x?y]{8}")
old_style_eq1.set_color(BLUE)
old_style_eq2 = TexMobject("\\log_x(8) ? \\log_y(8) = \\log_{x?y}(8)")
old_style_eq2.set_color(YELLOW)
for eq in old_style_eq1, old_style_eq2:
eq.to_edge(UP)
randy = Randolph()
randy.to_corner()
bubble = ThoughtBubble().pin_to(randy)
bubble.add_content(TOP(None, None, "8"))
self.add(randy, bubble)
self.play(ApplyMethod(randy.change_mode, "pondering"))
self.wait(3)
triangle = bubble.content.triangle
eight = bubble.content.values[2]
bubble.clear()
self.play(
Transform(triangle, equation),
FadeOut(eight),
ApplyPointwiseFunction(
lambda p : (p+2*DOWN)*15/np.linalg.norm(p+2*DOWN),
bubble
),
FadeIn(old_style_eq1),
ApplyMethod(randy.shift, 3*DOWN + 3*LEFT),
run_time = 2
)
self.remove(triangle)
self.add(equation)
self.wait(4)
self.play(
Transform(
symbols_at_top, symbols_at_lower_right,
path_arc = np.pi/2
),
Transform(old_style_eq1, old_style_eq2)
)
self.wait(2)
class AOplusB(Scene):
def construct(self):
self.add(TexMobject(
"a \\oplus b = \\dfrac{1}{\\frac{1}{a} + \\frac{1}{b}}"
).scale(2))
self.wait()
class ConstantLowerRight(Scene):
def construct(self):
top = TOP()
times = top.put_in_vertex(0, TexMobject("\\times"))
times.set_color(YELLOW)
oplus = top.put_in_vertex(1, TexMobject("\\oplus"))
oplus.set_color(BLUE)
dot = top.put_in_vertex(2, Dot())
eight = top.put_on_vertex(2, TexMobject("8"))
self.add(top)
self.play(ShowCreation(eight))
for mob in dot, oplus, times:
self.play(ShowCreation(mob))
self.wait()
top.add(eight)
top.add(times, oplus, dot)
top1, top2, top3 = tops = [
top.copy() for i in range(3)
]
big_oplus = TexMobject("\\oplus").scale(2).set_color(BLUE)
equals = TexMobject("=")
equation = VMobject(
top1, big_oplus, top2, equals, top3
)
equation.arrange_submobjects()
top3.shift(0.5*RIGHT)
x, y, xy = [
t.put_on_vertex(0, s)
for t, s in zip(tops, ["x", "y", "xy"])
]
old_style_eq = TexMobject(
"\\dfrac{1}{\\frac{1}{\\log_x(8)} + \\frac{1}{\\log_y(8)}} = \\log_{xy}(8)"
)
old_style_eq.to_edge(UP).set_color(RED)
triple_top_copy = VMobject(*[
top.copy() for i in range(3)
])
self.clear()
self.play(
Transform(triple_top_copy, VMobject(*tops)),
FadeIn(VMobject(x, y, xy, big_oplus, equals))
)
self.remove(triple_top_copy)
self.add(*tops)
self.play(Write(old_style_eq))
self.wait(3)
syms = VMobject(x, y, xy)
new_syms = VMobject(*[
t.put_on_vertex(1, s)
for t, s in zip(tops, ["x", "y", "x \\oplus y"])
])
new_old_style_eq = TexMobject(
"\\sqrt[x]{8} \\sqrt[y]{8} = \\sqrt[X]{8}"
)
X = new_old_style_eq.split()[-4]
frac = TexMobject("\\frac{1}{\\frac{1}{x} + \\frac{1}{y}}")
frac.replace(X)
frac_lower_right = frac.get_corner(DOWN+RIGHT)
frac.scale(2)
frac.shift(frac_lower_right - frac.get_corner(DOWN+RIGHT))
new_old_style_eq.submobjects[-4] = frac
new_old_style_eq.to_edge(UP)
new_old_style_eq.set_color(RED)
big_times = TexMobject("\\times").set_color(YELLOW)
big_times.shift(big_oplus.get_center())
self.play(
Transform(old_style_eq, new_old_style_eq),
Transform(syms, new_syms, path_arc = np.pi/2),
Transform(big_oplus, big_times)
)
self.wait(4)
class TowerExponentFrame(Scene):
def construct(self):
words = TextMobject("""
Consider an expression like $3^{3^3}$. It's
ambiguous whether this means $27^3$ or $3^{27}$,
which is the difference between $19{,}683$ and
$7{,}625{,}597{,}484{,}987$. But with the triangle
of power, the difference is crystal clear:
""")
words.scale_to_fit_width(FRAME_WIDTH-1)
words.to_edge(UP)
top1 = TOP(TOP(3, 3), 3)
top2 = TOP(3, (TOP(3, 3)))
for top in top1, top2:
top.next_to(words, DOWN)
top1.shift(3*LEFT)
top2.shift(3*RIGHT)
self.add(words, top1, top2)
self.wait()
class ExponentialGrowth(Scene):
def construct(self):
words = TextMobject("""
Let's say you are studying a certain growth rate,
and you come across an expression like $T^a$. It
matters a lot whether you consider $T$ or $a$
to be the variable, since exponential growth and
polynomial growth have very different flavors. The
nice thing about having a triangle that you can write
inside is that you can clarify this kind of ambiguity
by writing a little dot next to the constant and
a ``$\\sim$'' next to the variable.
""")
words.scale(0.75)
words.to_edge(UP)
top = TOP("T", "a")
top.next_to(words, DOWN)
dot = top.put_in_vertex(0, TexMobject("\\cdot"))
sim = top.put_in_vertex(1, TexMobject("\\sim"))
self.add(words, top, dot, sim)
self.show_frame()
self.wait()
class GoExplore(Scene):
def construct(self):
explore = TextMobject("Go explore!")
by_the_way = TextMobject("by the way \\dots")
by_the_way.shift(20*RIGHT)
self.play(Write(explore))
self.wait(4)
self.play(
ApplyMethod(
VMobject(explore, by_the_way).shift,
20*LEFT
)
)
self.wait(3)
| 31.683575 | 87 | 0.542693 | import numbers
from big_ol_pile_of_manim_imports import *
from functools import reduce
OPERATION_COLORS = [YELLOW, GREEN, BLUE_B]
def get_equation(index, x = 2, y = 3, z = 8, expression_only = False):
assert(index in [0, 1, 2])
if index == 0:
tex1 = "\\sqrt[%d]{%d}"%(y, z),
tex2 = " = %d"%x
elif index == 1:
tex1 = "\\log_%d(%d)"%(x, z),
tex2 = " = %d"%y
elif index == 2:
tex1 = "%d^%d"%(x, y),
tex2 = " = %d"%z
if expression_only:
tex = tex1
else:
tex = tex1+tex2
return TexMobject(tex).set_color(OPERATION_COLORS[index])
def get_inverse_rules():
return map(TexMobject, [
"x^{\\log_x(z)} = z",
"\\log_x\\left(x^y \\right) = y",
"\\sqrt[y]{x^y} = x",
"\\left(\\sqrt[y]{z}\\right)^y = z",
"\\sqrt[\\log_x(z)]{z} = x",
"\\log_{\\sqrt[y]{z}}(z) = y",
])
def get_top_inverse_rules():
result = []
pairs = [
(0, 2),
(0, 1),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
for i, j in pairs:
top = get_top_inverse(i, j)
char = ["x", "y", "z"][j]
eq = TexMobject("= %s"%char)
eq.scale(2)
eq.next_to(top, RIGHT)
diff = eq.get_center() - top.triangle.get_center()
eq.shift(diff[1]*UP)
result.append(VMobject(top, eq))
return result
def get_top_inverse(i, j):
args = [None]*3
k = set([0, 1, 2]).difference([i, j]).pop()
args[i] = ["x", "y", "z"][i]
big_top = TOP(*args)
args[j] = ["x", "y", "z"][j]
lil_top = TOP(*args, triangle_height_to_number_height = 1.5)
big_top.set_value(k, lil_top)
return big_top
class TOP(VMobject):
CONFIG = {
"triangle_height_to_number_height" : 3,
"offset_multiple" : 1.5,
"radius" : 1.5,
"propagate_style_to_family" : False,
}
def __init__(self, x = None, y = None, z = None, **kwargs):
digest_config(self, kwargs, locals())
VMobject.__init__(self, **kwargs)
def generate_points(self):
vertices = [
self.radius*rotate_vector(RIGHT, 7*np.pi/6 - i*2*np.pi/3)
for i in range(3)
]
self.triangle = Polygon(
*vertices,
color = WHITE,
stroke_width = 5
)
self.values = [VMobject()]*3
self.set_values(self.x, self.y, self.z)
def set_values(self, x, y, z):
for i, mob in enumerate([x, y, z]):
self.set_value(i, mob)
def set_value(self, index, value):
self.values[index] = self.put_on_vertex(index, value)
self.reset_submobjects()
def put_on_vertex(self, index, value):
assert(index in [0, 1, 2])
if value is None:
value = VectorizedPoint()
if isinstance(value, numbers.Number):
value = str(value)
if isinstance(value, str):
value = TexMobject(value)
if isinstance(value, TOP):
return self.put_top_on_vertix(index, value)
self.rescale_corner_mobject(value)
value.center()
if index == 0:
offset = -value.get_corner(UP+RIGHT)
elif index == 1:
offset = -value.get_bottom()
elif index == 2:
offset = -value.get_corner(UP+LEFT)
value.shift(self.offset_multiple*offset)
anchors = self.triangle.get_anchors_and_handles()[0]
value.shift(anchors[index])
return value
def put_top_on_vertix(self, index, top):
top.scale_to_fit_height(2*self.get_value_height())
vertices = np.array(top.get_vertices())
vertices[index] = 0
start = reduce(op.add, vertices)/2
end = self.triangle.get_anchors_and_handles()[0][index]
top.shift(end-start)
return top
def put_in_vertex(self, index, mobject):
self.rescale_corner_mobject(mobject)
mobject.center()
mobject.shift(interpolate(
self.get_center(),
self.get_vertices()[index],
0.7
))
return mobject
def get_surrounding_circle(self, color = YELLOW):
return Circle(
radius = 1.7*self.radius,
color = color
).shift(
self.triangle.get_center(),
(self.triangle.get_height()/6)*DOWN
)
def rescale_corner_mobject(self, mobject):
mobject.scale_to_fit_height(self.get_value_height())
return self
def get_value_height(self):
return self.triangle.get_height()/self.triangle_height_to_number_height
def get_center(self):
return center_of_mass(self.get_vertices())
def get_vertices(self):
return self.triangle.get_anchors_and_handles()[0][:3]
def reset_submobjects(self):
self.submobjects = [self.triangle] + self.values
return self
class IntroduceNotation(Scene):
def construct(self):
top = TOP()
equation = TexMobject("2^3 = 8")
equation.to_corner(UP+LEFT)
two, three, eight = [
top.put_on_vertex(i, num)
for i, num in enumerate([2, 3, 8])
]
self.play(FadeIn(equation))
self.wait()
self.play(ShowCreation(top))
for num in two, three, eight:
self.play(ShowCreation(num), run_time=2)
self.wait()
class ShowRule(Scene):
args_list = [(0,), (1,), (2,)]
@staticmethod
def args_to_string(index):
return str(index)
@staticmethod
def string_to_args(index_string):
result = int(index_string)
assert(result in [0, 1, 2])
return result
def construct(self, index):
equation = get_equation(index)
equation.to_corner(UP+LEFT)
top = TOP(2, 3, 8)
new_top = top.copy()
equals = TexMobject("=").scale(1.5)
new_top.next_to(equals, LEFT, buff = 1)
new_top.values[index].next_to(equals, RIGHT, buff = 1)
circle = Circle(
radius = 1.7*top.radius,
color = OPERATION_COLORS[index]
)
self.add(equation, top)
self.wait()
self.play(
Transform(top, new_top),
ShowCreation(equals)
)
circle.shift(new_top.triangle.get_center_of_mass())
new_circle = circle.copy()
new_top.put_on_vertex(index, new_circle)
self.wait()
self.play(ShowCreation(circle))
self.wait()
self.play(
Transform(circle, new_circle),
ApplyMethod(new_top.values[index].set_color, circle.color)
)
self.wait()
class AllThree(Scene):
def construct(self):
tops = []
equations = []
args = (2, 3, 8)
for i in 2, 1, 0:
new_args = list(args)
new_args[i] = None
top = TOP(*new_args, triangle_height_to_number_height = 2)
top.shift(i*4.5*LEFT)
equation = get_equation(i, expression_only = True)
equation.scale(3)
equation.next_to(top, DOWN, buff = 0.7)
tops.append(top)
equations.append(equation)
VMobject(*tops+equations).center()
for top, eq in zip(tops, equations):
self.play(FadeIn(top), FadeIn(eq))
self.wait(3)
self.wait()
class SixDifferentInverses(Scene):
def construct(self):
rules = get_inverse_rules()
vects = it.starmap(op.add, it.product(
[3*UP, 0.5*UP, 2*DOWN], [2*LEFT, 2*RIGHT]
))
for rule, vect in zip(rules, vects):
rule.shift(vect)
general_idea = TexMobject("f(f^{-1}(a)) = a")
self.play(Write(VMobject(*rules)))
self.wait()
for s, color in (rules[:4], GREEN), (rules[4:], RED):
mob = VMobject(*s)
self.play(ApplyMethod(mob.set_color, color))
self.wait()
self.play(ApplyMethod(mob.set_color, WHITE))
self.play(
ApplyMethod(VMobject(*rules[::2]).to_edge, LEFT),
ApplyMethod(VMobject(*rules[1::2]).to_edge, RIGHT),
GrowFromCenter(general_idea)
)
self.wait()
top_rules = get_top_inverse_rules()
for rule, top_rule in zip(rules, top_rules):
top_rule.scale_to_fit_height(1.5)
top_rule.center()
top_rule.shift(rule.get_center())
self.play(*map(FadeOut, rules))
self.remove(*rules)
self.play(*map(GrowFromCenter, top_rules))
self.wait()
self.remove(general_idea)
rules = get_inverse_rules()
original = None
for i, (top_rule, rule) in enumerate(zip(top_rules, rules)):
rule.center().to_edge(UP)
rule.set_color(GREEN if i < 4 else RED)
self.add(rule)
new_top_rule = top_rule.copy().center().scale(1.5)
anims = [Transform(top_rule, new_top_rule)]
if original is not None:
anims.append(FadeIn(original))
original = top_rule.copy()
self.play(*anims)
self.wait()
self.animate_top_rule(top_rule)
self.remove(rule)
def animate_top_rule(self, top_rule):
lil_top, lil_symbol, symbol_index = None, None, None
big_top = top_rule.submobjects[0]
equals, right_symbol = top_rule.submobjects[1].split()
for i, value in enumerate(big_top.values):
if isinstance(value, TOP):
lil_top = value
elif isinstance(value, TexMobject):
symbol_index = i
else:
lil_symbol_index = i
lil_symbol = lil_top.values[lil_symbol_index]
assert(lil_top is not None and lil_symbol is not None)
cancel_parts = [
VMobject(top.triangle, top.values[symbol_index])
for top in (lil_top, big_top)
]
new_symbol = lil_symbol.copy()
new_symbol.replace(right_symbol)
vect = equals.get_center() - right_symbol.get_center()
new_symbol.shift(2*vect[0]*RIGHT)
self.play(
Transform(*cancel_parts, rate_func = rush_into)
)
self.play(
FadeOut(VMobject(*cancel_parts)),
Transform(lil_symbol, new_symbol, rate_func = rush_from)
)
self.wait()
self.remove(lil_symbol, top_rule, VMobject(*cancel_parts))
class SixSixSix(Scene):
def construct(self):
randy = Randolph(mode = "pondering").to_corner()
bubble = ThoughtBubble().pin_to(randy)
rules = get_inverse_rules()
sixes = TexMobject(["6", "6", "6"], next_to_buff = 1)
sixes.to_corner(UP+RIGHT)
sixes = sixes.split()
speech_bubble = SpeechBubble()
speech_bubble.pin_to(randy)
speech_bubble.write("I'll just study art!")
self.add(randy)
self.play(ShowCreation(bubble))
bubble.add_content(VectorizedPoint())
for i, rule in enumerate(rules):
if i%2 == 0:
anim = ShowCreation(sixes[i/2])
else:
anim = Blink(randy)
self.play(
ApplyMethod(bubble.add_content, rule),
anim
)
self.wait()
self.wait()
words = speech_bubble.content
equation = bubble.content
speech_bubble.clear()
bubble.clear()
self.play(
ApplyMethod(randy.change_mode, "angry"),
Transform(bubble, speech_bubble),
Transform(equation, words),
FadeOut(VMobject(*sixes))
)
self.wait()
class AdditiveProperty(Scene):
def construct(self):
exp_rule, log_rule = self.write_old_style_rules()
t_exp_rule, t_log_rule = self.get_new_style_rules()
self.play(
ApplyMethod(exp_rule.to_edge, UP),
ApplyMethod(log_rule.to_edge, DOWN, 1.5)
)
t_exp_rule.next_to(exp_rule, DOWN)
t_exp_rule.set_color(GREEN)
t_log_rule.next_to(log_rule, UP)
t_log_rule.set_color(RED)
self.play(
FadeIn(t_exp_rule),
FadeIn(t_log_rule),
ApplyMethod(exp_rule.set_color, GREEN),
ApplyMethod(log_rule.set_color, RED),
)
self.wait()
all_tops = filter(
lambda m : isinstance(m, TOP),
t_exp_rule.split()+t_log_rule.split()
)
self.put_in_circles(all_tops)
self.set_color_appropriate_parts(t_exp_rule, t_log_rule)
def write_old_style_rules(self):
start = TexMobject("a^x a^y = a^{x+y}")
end = TexMobject("\\log_a(xy) = \\log_a(x) + \\log_a(y)")
start.shift(UP)
end.shift(DOWN)
a1, x1, a2, y1, eq1, a3, p1, x2, y2 = start.split()
a4, x3, y3, eq2, a5, x4, p2, a6, y4 = np.array(end.split())[
[3, 5, 6, 8, 12, 14, 16, 20, 22]
]
start_copy = start.copy()
self.play(Write(start_copy))
self.wait()
self.play(Transform(
VMobject(a1, x1, a2, y1, eq1, a3, p1, x2, a3.copy(), y2),
VMobject(a4, x3, a4.copy(), y3, eq2, a5, p2, x4, a6, y4)
))
self.play(Write(end))
self.clear()
self.add(start_copy, end)
self.wait()
return start_copy, end
def get_new_style_rules(self):
upper_mobs = [
TOP("a", "x", "R"), Dot(),
TOP("a", "y", "R"), TexMobject("="),
TOP("a", "x+y")
]
lower_mobs = [
TOP("a", None, "xy"), TexMobject("="),
TOP("a", None, "x"), TexMobject("+"),
TOP("a", None, "y"),
]
for mob in upper_mobs + lower_mobs:
if isinstance(mob, TOP):
mob.scale(0.5)
for group in upper_mobs, lower_mobs:
for m1, m2 in zip(group, group[1:]):
m2.next_to(m1)
for top in upper_mobs[0], upper_mobs[2]:
top.set_value(2, None)
upper_mobs = VMobject(*upper_mobs).center().shift(2*UP)
lower_mobs = VMobject(*lower_mobs).center().shift(2*DOWN)
return upper_mobs, lower_mobs
def put_in_circles(self, tops):
anims = []
for top in tops:
for i, value in enumerate(top.values):
if isinstance(value, VectorizedPoint):
index = i
circle = top.put_on_vertex(index, Circle(color = WHITE))
anims.append(
Transform(top.copy().set_color(YELLOW), circle)
)
self.add(*[anim.mobject for anim in anims])
self.wait()
self.play(*anims)
self.wait()
def set_color_appropriate_parts(self, t_exp_rule, t_log_rule):
#Horribly hacky
circle1 = t_exp_rule.split()[0].put_on_vertex(
2, Circle()
)
top_dot = t_exp_rule.split()[1]
circle2 = t_exp_rule.split()[2].put_on_vertex(
2, Circle()
)
top_plus = t_exp_rule.split()[4].values[1]
bottom_times = t_log_rule.split()[0].values[2]
circle3 = t_log_rule.split()[2].put_on_vertex(
1, Circle()
)
bottom_plus = t_log_rule.split()[3]
circle4 = t_log_rule.split()[4].put_on_vertex(
1, Circle()
)
mob_lists = [
[circle1, top_dot, circle2],
[top_plus],
[bottom_times],
[circle3, bottom_plus, circle4]
]
for mobs in mob_lists:
copies = VMobject(*mobs).copy()
self.play(ApplyMethod(
copies.set_color, YELLOW,
run_time = 0.5
))
self.play(ApplyMethod(
copies.scale_in_place, 1.2,
rate_func = there_and_back
))
self.wait()
self.remove(copies)
class DrawInsideTriangle(Scene):
def construct(self):
top = TOP()
top.scale(2)
dot = top.put_in_vertex(0, Dot())
plus = top.put_in_vertex(1, TexMobject("+"))
times = top.put_in_vertex(2, TexMobject("\\times"))
plus.set_color(GREEN)
times.set_color(YELLOW)
self.add(top)
self.wait()
for mob in dot, plus, times:
self.play(Write(mob, run_time = 1))
self.wait()
class ConstantOnTop(Scene):
def construct(self):
top = TOP()
dot = top.put_in_vertex(1, Dot())
times1 = top.put_in_vertex(0, TexMobject("\\times"))
times2 = top.put_in_vertex(2, TexMobject("\\times"))
times1.set_color(YELLOW)
times2.set_color(YELLOW)
three = top.put_on_vertex(1, "3")
lower_left_x = top.put_on_vertex(0, "x")
lower_right_x = top.put_on_vertex(2, "x")
x_cubed = TexMobject("x^3").to_edge(UP)
x_cubed.submobjects.reverse() #To align better
cube_root_x = TexMobject("\\sqrt[3]{x}").to_edge(UP)
self.add(top)
self.play(ShowCreation(three))
self.play(
FadeIn(lower_left_x),
Write(x_cubed),
run_time = 1
)
self.wait()
self.play(*[
Transform(*pair, path_arc = np.pi)
for pair in [
(lower_left_x, lower_right_x),
(x_cubed, cube_root_x),
]
])
self.wait(2)
for mob in dot, times1, times2:
self.play(ShowCreation(mob))
self.wait()
def get_const_top_TOP(*args):
top = TOP(*args)
dot = top.put_in_vertex(1, Dot())
times1 = top.put_in_vertex(0, TexMobject("\\times"))
times2 = top.put_in_vertex(2, TexMobject("\\times"))
times1.set_color(YELLOW)
times2.set_color(YELLOW)
top.add(dot, times1, times2)
return top
class MultiplyWithConstantTop(Scene):
def construct(self):
top1 = get_const_top_TOP("x", "3")
top2 = get_const_top_TOP("y", "3")
top3 = get_const_top_TOP("xy", "3")
times = TexMobject("\\times")
equals = TexMobject("=")
top_exp_equation = VMobject(
top1, times, top2, equals, top3
)
top_exp_equation.arrange_submobjects()
old_style_exp = TexMobject("(x^3)(y^3) = (xy)^3")
old_style_exp.to_edge(UP)
old_style_exp.set_color(GREEN)
old_style_rad = TexMobject("\\sqrt[3]{x} \\sqrt[3]{y} = \\sqrt[3]{xy}")
old_style_rad.to_edge(UP)
old_style_rad.set_color(RED)
self.add(top_exp_equation, old_style_exp)
self.wait(3)
old_tops = [top1, top2, top3]
new_tops = []
for top in old_tops:
new_top = top.copy()
new_top.put_on_vertex(2, new_top.values[0])
new_top.shift(0.5*LEFT)
new_tops.append(new_top)
self.play(
Transform(old_style_exp, old_style_rad),
Transform(
VMobject(*old_tops),
VMobject(*new_tops),
path_arc = np.pi/2
)
)
self.wait(3)
class RightStaysConstantQ(Scene):
def construct(self):
top1, top2, top3 = old_tops = [
TOP(None, s, "8")
for s in ("x", "y", TexMobject("x?y"))
]
q_mark = TexMobject("?").scale(2)
equation = VMobject(
top1, q_mark, top2, TexMobject("="), top3
)
equation.arrange_submobjects(buff = 0.7)
symbols_at_top = VMobject(*[
top.values[1]
for top in (top1, top2, top3)
])
symbols_at_lower_right = VMobject(*[
top.put_on_vertex(0, top.values[1].copy())
for top in (top1, top2, top3)
])
old_style_eq1 = TexMobject("\\sqrt[x]{8} ? \\sqrt[y]{8} = \\sqrt[x?y]{8}")
old_style_eq1.set_color(BLUE)
old_style_eq2 = TexMobject("\\log_x(8) ? \\log_y(8) = \\log_{x?y}(8)")
old_style_eq2.set_color(YELLOW)
for eq in old_style_eq1, old_style_eq2:
eq.to_edge(UP)
randy = Randolph()
randy.to_corner()
bubble = ThoughtBubble().pin_to(randy)
bubble.add_content(TOP(None, None, "8"))
self.add(randy, bubble)
self.play(ApplyMethod(randy.change_mode, "pondering"))
self.wait(3)
triangle = bubble.content.triangle
eight = bubble.content.values[2]
bubble.clear()
self.play(
Transform(triangle, equation),
FadeOut(eight),
ApplyPointwiseFunction(
lambda p : (p+2*DOWN)*15/np.linalg.norm(p+2*DOWN),
bubble
),
FadeIn(old_style_eq1),
ApplyMethod(randy.shift, 3*DOWN + 3*LEFT),
run_time = 2
)
self.remove(triangle)
self.add(equation)
self.wait(4)
self.play(
Transform(
symbols_at_top, symbols_at_lower_right,
path_arc = np.pi/2
),
Transform(old_style_eq1, old_style_eq2)
)
self.wait(2)
class AOplusB(Scene):
def construct(self):
self.add(TexMobject(
"a \\oplus b = \\dfrac{1}{\\frac{1}{a} + \\frac{1}{b}}"
).scale(2))
self.wait()
class ConstantLowerRight(Scene):
def construct(self):
top = TOP()
times = top.put_in_vertex(0, TexMobject("\\times"))
times.set_color(YELLOW)
oplus = top.put_in_vertex(1, TexMobject("\\oplus"))
oplus.set_color(BLUE)
dot = top.put_in_vertex(2, Dot())
eight = top.put_on_vertex(2, TexMobject("8"))
self.add(top)
self.play(ShowCreation(eight))
for mob in dot, oplus, times:
self.play(ShowCreation(mob))
self.wait()
top.add(eight)
top.add(times, oplus, dot)
top1, top2, top3 = tops = [
top.copy() for i in range(3)
]
big_oplus = TexMobject("\\oplus").scale(2).set_color(BLUE)
equals = TexMobject("=")
equation = VMobject(
top1, big_oplus, top2, equals, top3
)
equation.arrange_submobjects()
top3.shift(0.5*RIGHT)
x, y, xy = [
t.put_on_vertex(0, s)
for t, s in zip(tops, ["x", "y", "xy"])
]
old_style_eq = TexMobject(
"\\dfrac{1}{\\frac{1}{\\log_x(8)} + \\frac{1}{\\log_y(8)}} = \\log_{xy}(8)"
)
old_style_eq.to_edge(UP).set_color(RED)
triple_top_copy = VMobject(*[
top.copy() for i in range(3)
])
self.clear()
self.play(
Transform(triple_top_copy, VMobject(*tops)),
FadeIn(VMobject(x, y, xy, big_oplus, equals))
)
self.remove(triple_top_copy)
self.add(*tops)
self.play(Write(old_style_eq))
self.wait(3)
syms = VMobject(x, y, xy)
new_syms = VMobject(*[
t.put_on_vertex(1, s)
for t, s in zip(tops, ["x", "y", "x \\oplus y"])
])
new_old_style_eq = TexMobject(
"\\sqrt[x]{8} \\sqrt[y]{8} = \\sqrt[X]{8}"
)
X = new_old_style_eq.split()[-4]
frac = TexMobject("\\frac{1}{\\frac{1}{x} + \\frac{1}{y}}")
frac.replace(X)
frac_lower_right = frac.get_corner(DOWN+RIGHT)
frac.scale(2)
frac.shift(frac_lower_right - frac.get_corner(DOWN+RIGHT))
new_old_style_eq.submobjects[-4] = frac
new_old_style_eq.to_edge(UP)
new_old_style_eq.set_color(RED)
big_times = TexMobject("\\times").set_color(YELLOW)
big_times.shift(big_oplus.get_center())
self.play(
Transform(old_style_eq, new_old_style_eq),
Transform(syms, new_syms, path_arc = np.pi/2),
Transform(big_oplus, big_times)
)
self.wait(4)
class TowerExponentFrame(Scene):
def construct(self):
words = TextMobject("""
Consider an expression like $3^{3^3}$. It's
ambiguous whether this means $27^3$ or $3^{27}$,
which is the difference between $19{,}683$ and
$7{,}625{,}597{,}484{,}987$. But with the triangle
of power, the difference is crystal clear:
""")
words.scale_to_fit_width(FRAME_WIDTH-1)
words.to_edge(UP)
top1 = TOP(TOP(3, 3), 3)
top2 = TOP(3, (TOP(3, 3)))
for top in top1, top2:
top.next_to(words, DOWN)
top1.shift(3*LEFT)
top2.shift(3*RIGHT)
self.add(words, top1, top2)
self.wait()
class ExponentialGrowth(Scene):
def construct(self):
words = TextMobject("""
Let's say you are studying a certain growth rate,
and you come across an expression like $T^a$. It
matters a lot whether you consider $T$ or $a$
to be the variable, since exponential growth and
polynomial growth have very different flavors. The
nice thing about having a triangle that you can write
inside is that you can clarify this kind of ambiguity
by writing a little dot next to the constant and
a ``$\\sim$'' next to the variable.
""")
words.scale(0.75)
words.to_edge(UP)
top = TOP("T", "a")
top.next_to(words, DOWN)
dot = top.put_in_vertex(0, TexMobject("\\cdot"))
sim = top.put_in_vertex(1, TexMobject("\\sim"))
self.add(words, top, dot, sim)
self.show_frame()
self.wait()
class GoExplore(Scene):
def construct(self):
explore = TextMobject("Go explore!")
by_the_way = TextMobject("by the way \\dots")
by_the_way.shift(20*RIGHT)
self.play(Write(explore))
self.wait(4)
self.play(
ApplyMethod(
VMobject(explore, by_the_way).shift,
20*LEFT
)
)
self.wait(3)
| true | true |
f7f4c3f03fd1e93fa2f2f599b170636bcf450aec | 463 | py | Python | DynamicTesting/buildImage.py | AbhiTaker/Container-Testing-Platform | 90d597a533a29a7984f9c7dc8ce2b59c71bd85ec | [
"MIT"
] | 1 | 2019-09-18T13:52:09.000Z | 2019-09-18T13:52:09.000Z | DynamicTesting/buildImage.py | AbhiTaker/Container-Testing-Platform | 90d597a533a29a7984f9c7dc8ce2b59c71bd85ec | [
"MIT"
] | null | null | null | DynamicTesting/buildImage.py | AbhiTaker/Container-Testing-Platform | 90d597a533a29a7984f9c7dc8ce2b59c71bd85ec | [
"MIT"
] | null | null | null | import compilers
import docker
def basicImage():
client = docker.from_env()
for key in compilers.compilers:
print(key)
tagName = key
dockerFileText = compilers.compilers[key]
dockerFile = open('dfile/Dockerfile', 'w', encoding = 'utf-8') # Getting the content of Required Dockerfile
dockerFile.write(dockerFileText)
dockerFile.close()
client.images.build(path="dfile", tag = tagName)
| 28.9375 | 119 | 0.643629 | import compilers
import docker
def basicImage():
client = docker.from_env()
for key in compilers.compilers:
print(key)
tagName = key
dockerFileText = compilers.compilers[key]
dockerFile = open('dfile/Dockerfile', 'w', encoding = 'utf-8')
dockerFile.write(dockerFileText)
dockerFile.close()
client.images.build(path="dfile", tag = tagName)
| true | true |
f7f4c44405a26e02bf7f109f2d2e9af3566bfdd8 | 11,046 | py | Python | sdk/python/pulumi_azure_native/datadog/v20200201preview/_inputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/datadog/v20200201preview/_inputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/datadog/v20200201preview/_inputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'DatadogOrganizationPropertiesArgs',
'IdentityPropertiesArgs',
'MonitorPropertiesArgs',
'ResourceSkuArgs',
'UserInfoArgs',
]
@pulumi.input_type
class DatadogOrganizationPropertiesArgs:
def __init__(__self__, *,
api_key: Optional[pulumi.Input[str]] = None,
application_key: Optional[pulumi.Input[str]] = None,
enterprise_app_id: Optional[pulumi.Input[str]] = None,
linking_auth_code: Optional[pulumi.Input[str]] = None,
linking_client_id: Optional[pulumi.Input[str]] = None,
redirect_uri: Optional[pulumi.Input[str]] = None):
"""
Datadog organization properties
:param pulumi.Input[str] api_key: Api key associated to the Datadog organization.
:param pulumi.Input[str] application_key: Application key associated to the Datadog organization.
:param pulumi.Input[str] enterprise_app_id: The Id of the Enterprise App used for Single sign on.
:param pulumi.Input[str] linking_auth_code: The auth code used to linking to an existing datadog organization.
:param pulumi.Input[str] linking_client_id: The client_id from an existing in exchange for an auth token to link organization.
:param pulumi.Input[str] redirect_uri: The redirect uri for linking.
"""
if api_key is not None:
pulumi.set(__self__, "api_key", api_key)
if application_key is not None:
pulumi.set(__self__, "application_key", application_key)
if enterprise_app_id is not None:
pulumi.set(__self__, "enterprise_app_id", enterprise_app_id)
if linking_auth_code is not None:
pulumi.set(__self__, "linking_auth_code", linking_auth_code)
if linking_client_id is not None:
pulumi.set(__self__, "linking_client_id", linking_client_id)
if redirect_uri is not None:
pulumi.set(__self__, "redirect_uri", redirect_uri)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> Optional[pulumi.Input[str]]:
"""
Api key associated to the Datadog organization.
"""
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter(name="applicationKey")
def application_key(self) -> Optional[pulumi.Input[str]]:
"""
Application key associated to the Datadog organization.
"""
return pulumi.get(self, "application_key")
@application_key.setter
def application_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_key", value)
@property
@pulumi.getter(name="enterpriseAppId")
def enterprise_app_id(self) -> Optional[pulumi.Input[str]]:
"""
The Id of the Enterprise App used for Single sign on.
"""
return pulumi.get(self, "enterprise_app_id")
@enterprise_app_id.setter
def enterprise_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enterprise_app_id", value)
@property
@pulumi.getter(name="linkingAuthCode")
def linking_auth_code(self) -> Optional[pulumi.Input[str]]:
"""
The auth code used to linking to an existing datadog organization.
"""
return pulumi.get(self, "linking_auth_code")
@linking_auth_code.setter
def linking_auth_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linking_auth_code", value)
@property
@pulumi.getter(name="linkingClientId")
def linking_client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client_id from an existing in exchange for an auth token to link organization.
"""
return pulumi.get(self, "linking_client_id")
@linking_client_id.setter
def linking_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linking_client_id", value)
@property
@pulumi.getter(name="redirectUri")
def redirect_uri(self) -> Optional[pulumi.Input[str]]:
"""
The redirect uri for linking.
"""
return pulumi.get(self, "redirect_uri")
@redirect_uri.setter
def redirect_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redirect_uri", value)
@pulumi.input_type
class IdentityPropertiesArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[Union[str, 'ManagedIdentityTypes']]] = None):
"""
:param pulumi.Input[Union[str, 'ManagedIdentityTypes']] type: Identity type
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ManagedIdentityTypes']]]:
"""
Identity type
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ManagedIdentityTypes']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class MonitorPropertiesArgs:
def __init__(__self__, *,
datadog_organization_properties: Optional[pulumi.Input['DatadogOrganizationPropertiesArgs']] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None,
user_info: Optional[pulumi.Input['UserInfoArgs']] = None):
"""
Properties specific to the monitor resource.
:param pulumi.Input['DatadogOrganizationPropertiesArgs'] datadog_organization_properties: Datadog organization properties
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Flag specifying if the resource monitoring is enabled or disabled.
:param pulumi.Input['UserInfoArgs'] user_info: User info
"""
if datadog_organization_properties is not None:
pulumi.set(__self__, "datadog_organization_properties", datadog_organization_properties)
if monitoring_status is not None:
pulumi.set(__self__, "monitoring_status", monitoring_status)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if user_info is not None:
pulumi.set(__self__, "user_info", user_info)
@property
@pulumi.getter(name="datadogOrganizationProperties")
def datadog_organization_properties(self) -> Optional[pulumi.Input['DatadogOrganizationPropertiesArgs']]:
"""
Datadog organization properties
"""
return pulumi.get(self, "datadog_organization_properties")
@datadog_organization_properties.setter
def datadog_organization_properties(self, value: Optional[pulumi.Input['DatadogOrganizationPropertiesArgs']]):
pulumi.set(self, "datadog_organization_properties", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> Optional[pulumi.Input[Union[str, 'MonitoringStatus']]]:
"""
Flag specifying if the resource monitoring is enabled or disabled.
"""
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="userInfo")
def user_info(self) -> Optional[pulumi.Input['UserInfoArgs']]:
"""
User info
"""
return pulumi.get(self, "user_info")
@user_info.setter
def user_info(self, value: Optional[pulumi.Input['UserInfoArgs']]):
pulumi.set(self, "user_info", value)
@pulumi.input_type
class ResourceSkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: Name of the SKU.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the SKU.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class UserInfoArgs:
def __init__(__self__, *,
email_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None):
"""
User info
:param pulumi.Input[str] email_address: Email of the user used by Datadog for contacting them if needed
:param pulumi.Input[str] name: Name of the user
:param pulumi.Input[str] phone_number: Phone number of the user used by Datadog for contacting them if needed
"""
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
if name is not None:
pulumi.set(__self__, "name", name)
if phone_number is not None:
pulumi.set(__self__, "phone_number", phone_number)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
Email of the user used by Datadog for contacting them if needed
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the user
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> Optional[pulumi.Input[str]]:
"""
Phone number of the user used by Datadog for contacting them if needed
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone_number", value)
| 37.699659 | 145 | 0.658157 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'DatadogOrganizationPropertiesArgs',
'IdentityPropertiesArgs',
'MonitorPropertiesArgs',
'ResourceSkuArgs',
'UserInfoArgs',
]
@pulumi.input_type
class DatadogOrganizationPropertiesArgs:
def __init__(__self__, *,
api_key: Optional[pulumi.Input[str]] = None,
application_key: Optional[pulumi.Input[str]] = None,
enterprise_app_id: Optional[pulumi.Input[str]] = None,
linking_auth_code: Optional[pulumi.Input[str]] = None,
linking_client_id: Optional[pulumi.Input[str]] = None,
redirect_uri: Optional[pulumi.Input[str]] = None):
if api_key is not None:
pulumi.set(__self__, "api_key", api_key)
if application_key is not None:
pulumi.set(__self__, "application_key", application_key)
if enterprise_app_id is not None:
pulumi.set(__self__, "enterprise_app_id", enterprise_app_id)
if linking_auth_code is not None:
pulumi.set(__self__, "linking_auth_code", linking_auth_code)
if linking_client_id is not None:
pulumi.set(__self__, "linking_client_id", linking_client_id)
if redirect_uri is not None:
pulumi.set(__self__, "redirect_uri", redirect_uri)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter(name="applicationKey")
def application_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "application_key")
@application_key.setter
def application_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_key", value)
@property
@pulumi.getter(name="enterpriseAppId")
def enterprise_app_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "enterprise_app_id")
@enterprise_app_id.setter
def enterprise_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enterprise_app_id", value)
@property
@pulumi.getter(name="linkingAuthCode")
def linking_auth_code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "linking_auth_code")
@linking_auth_code.setter
def linking_auth_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linking_auth_code", value)
@property
@pulumi.getter(name="linkingClientId")
def linking_client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "linking_client_id")
@linking_client_id.setter
def linking_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linking_client_id", value)
@property
@pulumi.getter(name="redirectUri")
def redirect_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "redirect_uri")
@redirect_uri.setter
def redirect_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redirect_uri", value)
@pulumi.input_type
class IdentityPropertiesArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[Union[str, 'ManagedIdentityTypes']]] = None):
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ManagedIdentityTypes']]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ManagedIdentityTypes']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class MonitorPropertiesArgs:
def __init__(__self__, *,
datadog_organization_properties: Optional[pulumi.Input['DatadogOrganizationPropertiesArgs']] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningState']]] = None,
user_info: Optional[pulumi.Input['UserInfoArgs']] = None):
if datadog_organization_properties is not None:
pulumi.set(__self__, "datadog_organization_properties", datadog_organization_properties)
if monitoring_status is not None:
pulumi.set(__self__, "monitoring_status", monitoring_status)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if user_info is not None:
pulumi.set(__self__, "user_info", user_info)
@property
@pulumi.getter(name="datadogOrganizationProperties")
def datadog_organization_properties(self) -> Optional[pulumi.Input['DatadogOrganizationPropertiesArgs']]:
return pulumi.get(self, "datadog_organization_properties")
@datadog_organization_properties.setter
def datadog_organization_properties(self, value: Optional[pulumi.Input['DatadogOrganizationPropertiesArgs']]):
pulumi.set(self, "datadog_organization_properties", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> Optional[pulumi.Input[Union[str, 'MonitoringStatus']]]:
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningState']]]:
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningState']]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="userInfo")
def user_info(self) -> Optional[pulumi.Input['UserInfoArgs']]:
return pulumi.get(self, "user_info")
@user_info.setter
def user_info(self, value: Optional[pulumi.Input['UserInfoArgs']]):
pulumi.set(self, "user_info", value)
@pulumi.input_type
class ResourceSkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class UserInfoArgs:
def __init__(__self__, *,
email_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None):
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
if name is not None:
pulumi.set(__self__, "name", name)
if phone_number is not None:
pulumi.set(__self__, "phone_number", phone_number)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone_number", value)
| true | true |
f7f4c44e03f96aae5aed1f58f7eca21bcb5bdba6 | 1,699 | py | Python | backtesting/backtester/BackTest/backtest.py | SankaW/teamfx | 88d4a6295b4c5e050fbb96c23d632097956e5cf8 | [
"MIT"
] | null | null | null | backtesting/backtester/BackTest/backtest.py | SankaW/teamfx | 88d4a6295b4c5e050fbb96c23d632097956e5cf8 | [
"MIT"
] | null | null | null | backtesting/backtester/BackTest/backtest.py | SankaW/teamfx | 88d4a6295b4c5e050fbb96c23d632097956e5cf8 | [
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class Strategy(object):
"""Strategy is an abstract base class providing an interface for
all subsequent (inherited) trading strategies.
The goal of a (derived) Strategy object is to output a list of signals,
which has the form of a time series indexed pandas DataFrame.
In this instance only a single symbol/instrument is supported."""
__metaclass__ = ABCMeta
@abstractmethod
def generate_signals(self):
"""An implementation is required to return the DataFrame of symbols
containing the signals to go long, short or hold (1, -1 or 0)."""
raise NotImplementedError("Should implement generate_signals()!")
class Portfolio(object):
"""An abstract base class representing a portfolio of
positions (including both instruments and cash), determined
on the basis of a set of signals provided by a Strategy."""
__metaclass__ = ABCMeta
@abstractmethod
def generate_positions(self):
"""Provides the logic to determine how the portfolio
positions are allocated on the basis of forecasting
signals and available cash."""
raise NotImplementedError("Should implement generate_positions()!")
@abstractmethod
def backtest_portfolio(self):
"""Provides the logic to generate the trading orders
and subsequent equity curve (i.e. growth of total equity),
as a sum of holdings and cash, and the bar-period returns
associated with this curve based on the 'positions' DataFrame.
Produces a portfolio object that can be examined by
other classes/functions."""
raise NotImplementedError("Should implement backtest_portfolio()!") | 38.613636 | 75 | 0.728075 |
from abc import ABCMeta, abstractmethod
class Strategy(object):
__metaclass__ = ABCMeta
@abstractmethod
def generate_signals(self):
raise NotImplementedError("Should implement generate_signals()!")
class Portfolio(object):
__metaclass__ = ABCMeta
@abstractmethod
def generate_positions(self):
raise NotImplementedError("Should implement generate_positions()!")
@abstractmethod
def backtest_portfolio(self):
raise NotImplementedError("Should implement backtest_portfolio()!") | true | true |
f7f4c46ffbc95efb9a8733d177f945f9b84df6af | 4,064 | py | Python | flax/types/spend_bundle.py | grayfallstown/flax-blockchain | 58351afec41f78e031507d98b000db2087c2c13f | [
"Apache-2.0"
] | null | null | null | flax/types/spend_bundle.py | grayfallstown/flax-blockchain | 58351afec41f78e031507d98b000db2087c2c13f | [
"Apache-2.0"
] | null | null | null | flax/types/spend_bundle.py | grayfallstown/flax-blockchain | 58351afec41f78e031507d98b000db2087c2c13f | [
"Apache-2.0"
] | null | null | null | import dataclasses
import warnings
from dataclasses import dataclass
from typing import List
from blspy import AugSchemeMPL, G2Element
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.streamable import Streamable, dataclass_from_dict, recurse_jsonify, streamable
from flax.wallet.util.debug_spend_bundle import debug_spend_bundle
from .coin_spend import CoinSpend
@dataclass(frozen=True)
@streamable
class SpendBundle(Streamable):
"""
This is a list of coins being spent along with their solution programs, and a single
aggregated signature. This is the object that most closely corresponds to a bitcoin
transaction (although because of non-interactive signature aggregation, the boundaries
between transactions are more flexible than in bitcoin).
"""
coin_spends: List[CoinSpend]
aggregated_signature: G2Element
@property
def coin_solutions(self):
return self.coin_spends
@classmethod
def aggregate(cls, spend_bundles) -> "SpendBundle":
coin_spends: List[CoinSpend] = []
sigs: List[G2Element] = []
for bundle in spend_bundles:
coin_spends += bundle.coin_spends
sigs.append(bundle.aggregated_signature)
aggregated_signature = AugSchemeMPL.aggregate(sigs)
return cls(coin_spends, aggregated_signature)
def additions(self) -> List[Coin]:
items: List[Coin] = []
for coin_spend in self.coin_spends:
items.extend(coin_spend.additions())
return items
def removals(self) -> List[Coin]:
"""This should be used only by wallet"""
return [_.coin for _ in self.coin_spends]
def fees(self) -> int:
"""Unsafe to use for fees validation!!!"""
amount_in = sum(_.amount for _ in self.removals())
amount_out = sum(_.amount for _ in self.additions())
return amount_in - amount_out
def name(self) -> bytes32:
return self.get_hash()
def debug(self, agg_sig_additional_data=bytes([3] * 32)):
debug_spend_bundle(self, agg_sig_additional_data)
def not_ephemeral_additions(self):
all_removals = self.removals()
all_additions = self.additions()
result: List[Coin] = []
for add in all_additions:
if add in all_removals:
continue
result.append(add)
return result
# Note that `coin_spends` used to have the bad name `coin_solutions`.
# Some API still expects this name. For now, we accept both names.
#
# TODO: continue this deprecation. Eventually, all code below here should be removed.
# 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary)
# 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary)
# 3. remove all references to `include_legacy_keys=True`
# 4. remove all code below this point
@classmethod
def from_json_dict(cls, json_dict):
if "coin_solutions" in json_dict:
if "coin_spends" not in json_dict:
json_dict = dict(
aggregated_signature=json_dict["aggregated_signature"], coin_spends=json_dict["coin_solutions"]
)
warnings.warn("`coin_solutions` is now `coin_spends` in `SpendBundle.from_json_dict`")
else:
raise ValueError("JSON contains both `coin_solutions` and `coin_spends`, just use `coin_spends`")
return dataclass_from_dict(cls, json_dict)
def to_json_dict(self, include_legacy_keys: bool = True, exclude_modern_keys: bool = True):
if include_legacy_keys is False and exclude_modern_keys is True:
raise ValueError("`coin_spends` not included in legacy or modern outputs")
d = dataclasses.asdict(self)
if include_legacy_keys:
d["coin_solutions"] = d["coin_spends"]
if exclude_modern_keys:
del d["coin_spends"]
return recurse_jsonify(d)
| 37.284404 | 115 | 0.679626 | import dataclasses
import warnings
from dataclasses import dataclass
from typing import List
from blspy import AugSchemeMPL, G2Element
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.streamable import Streamable, dataclass_from_dict, recurse_jsonify, streamable
from flax.wallet.util.debug_spend_bundle import debug_spend_bundle
from .coin_spend import CoinSpend
@dataclass(frozen=True)
@streamable
class SpendBundle(Streamable):
coin_spends: List[CoinSpend]
aggregated_signature: G2Element
@property
def coin_solutions(self):
return self.coin_spends
@classmethod
def aggregate(cls, spend_bundles) -> "SpendBundle":
coin_spends: List[CoinSpend] = []
sigs: List[G2Element] = []
for bundle in spend_bundles:
coin_spends += bundle.coin_spends
sigs.append(bundle.aggregated_signature)
aggregated_signature = AugSchemeMPL.aggregate(sigs)
return cls(coin_spends, aggregated_signature)
def additions(self) -> List[Coin]:
items: List[Coin] = []
for coin_spend in self.coin_spends:
items.extend(coin_spend.additions())
return items
def removals(self) -> List[Coin]:
return [_.coin for _ in self.coin_spends]
def fees(self) -> int:
amount_in = sum(_.amount for _ in self.removals())
amount_out = sum(_.amount for _ in self.additions())
return amount_in - amount_out
def name(self) -> bytes32:
return self.get_hash()
def debug(self, agg_sig_additional_data=bytes([3] * 32)):
debug_spend_bundle(self, agg_sig_additional_data)
def not_ephemeral_additions(self):
all_removals = self.removals()
all_additions = self.additions()
result: List[Coin] = []
for add in all_additions:
if add in all_removals:
continue
result.append(add)
return result
@classmethod
def from_json_dict(cls, json_dict):
if "coin_solutions" in json_dict:
if "coin_spends" not in json_dict:
json_dict = dict(
aggregated_signature=json_dict["aggregated_signature"], coin_spends=json_dict["coin_solutions"]
)
warnings.warn("`coin_solutions` is now `coin_spends` in `SpendBundle.from_json_dict`")
else:
raise ValueError("JSON contains both `coin_solutions` and `coin_spends`, just use `coin_spends`")
return dataclass_from_dict(cls, json_dict)
def to_json_dict(self, include_legacy_keys: bool = True, exclude_modern_keys: bool = True):
if include_legacy_keys is False and exclude_modern_keys is True:
raise ValueError("`coin_spends` not included in legacy or modern outputs")
d = dataclasses.asdict(self)
if include_legacy_keys:
d["coin_solutions"] = d["coin_spends"]
if exclude_modern_keys:
del d["coin_spends"]
return recurse_jsonify(d)
| true | true |
f7f4c4efa690166b3df688287cfb0d313ebf4bfb | 267 | py | Python | software/python/potentiostat/examples/get_device_id.py | GVRX/potentiostat | 1bb44639180ad6d81697631d4d5f699e6fb4eef1 | [
"MIT"
] | null | null | null | software/python/potentiostat/examples/get_device_id.py | GVRX/potentiostat | 1bb44639180ad6d81697631d4d5f699e6fb4eef1 | [
"MIT"
] | null | null | null | software/python/potentiostat/examples/get_device_id.py | GVRX/potentiostat | 1bb44639180ad6d81697631d4d5f699e6fb4eef1 | [
"MIT"
] | null | null | null | from __future__ import print_function
from potentiostat import Potentiostat
import sys
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/tty.usbmodem65156601'
dev = Potentiostat(port)
rsp = dev.get_device_id()
print('device id: {0}'.format(rsp))
| 19.071429 | 38 | 0.722846 | from __future__ import print_function
from potentiostat import Potentiostat
import sys
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/tty.usbmodem65156601'
dev = Potentiostat(port)
rsp = dev.get_device_id()
print('device id: {0}'.format(rsp))
| true | true |
f7f4c50af7d31fda0da774e1571e4acc94db1c9e | 1,477 | py | Python | multi_thread.py | maneeshdisodia/pythonic_examples | f722bfbe253bbcead111ba082550bdfd1c6046d3 | [
"MIT"
] | null | null | null | multi_thread.py | maneeshdisodia/pythonic_examples | f722bfbe253bbcead111ba082550bdfd1c6046d3 | [
"MIT"
] | null | null | null | multi_thread.py | maneeshdisodia/pythonic_examples | f722bfbe253bbcead111ba082550bdfd1c6046d3 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from threading import Thread
from multiprocessing import Queue
df = pd.DataFrame(data=np.random.rand(100).reshape(10, 10))
print(df.head())
rows = df.index
column = df.columns
que = Queue()
# long run
def long_run(row, col, pv):
for r in row:
for c in col:
pv.at[r, c] = 1
que.put(pv)
return
threads = []
def thread_run(n, df):
np_r = np.array_split(rows, n)
for i in range(n):
print(i)
print('min =' + str(np_r[i].min()) + ' max = ' + str(np_r[i].max()))
print()
t = Thread(target=long_run,
args=(rows[np_r[i].min():np_r[i].max()], column[:], df[np_r[i].min():np_r[i].max()]))
threads.append(t)
t.start()
if __name__ == '__main__':
thread_run(4, df)
lst = []
mydf = pd.DataFrame()
while not que.empty():
result = que.get()
print('thread 1:::::>>>>>>>>')
print(result)
lst.append(result)
print(lst)
# for i in lst:
# mydf = pd.concat([mydf,i], axis=1)
# mydf.head()
# from multiprocessing.pool import ThreadPool
#
#
# def foo(bar, baz):
# print('hello {0}'.format(bar))
# return 'foo' + baz
#
#
# pool = ThreadPool(processes=5)
#
# async_result = pool.apply_async(foo, ('world', 'foo')) # tuple of args for foo
#
# # do some other stuff in the main process
#
# return_val = async_result.get() # get the return value from your function.
| 21.405797 | 104 | 0.570752 | import pandas as pd
import numpy as np
from threading import Thread
from multiprocessing import Queue
df = pd.DataFrame(data=np.random.rand(100).reshape(10, 10))
print(df.head())
rows = df.index
column = df.columns
que = Queue()
def long_run(row, col, pv):
for r in row:
for c in col:
pv.at[r, c] = 1
que.put(pv)
return
threads = []
def thread_run(n, df):
np_r = np.array_split(rows, n)
for i in range(n):
print(i)
print('min =' + str(np_r[i].min()) + ' max = ' + str(np_r[i].max()))
print()
t = Thread(target=long_run,
args=(rows[np_r[i].min():np_r[i].max()], column[:], df[np_r[i].min():np_r[i].max()]))
threads.append(t)
t.start()
if __name__ == '__main__':
thread_run(4, df)
lst = []
mydf = pd.DataFrame()
while not que.empty():
result = que.get()
print('thread 1:::::>>>>>>>>')
print(result)
lst.append(result)
print(lst)
| true | true |
f7f4c5e305e171a8a91aa2eb3a10aa23a29972f3 | 4,905 | py | Python | drosoph_vae/settings/skeleton.py | samuelsmal/drosophVAE | 4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7 | [
"MIT"
] | null | null | null | drosoph_vae/settings/skeleton.py | samuelsmal/drosophVAE | 4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7 | [
"MIT"
] | null | null | null | drosoph_vae/settings/skeleton.py | samuelsmal/drosophVAE | 4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7 | [
"MIT"
] | null | null | null | from enum import Enum
import numpy as np
num_cameras = 7
class Tracked(Enum):
BODY_COXA = 0
COXA_FEMUR = 1
FEMUR_TIBIA = 2
TIBIA_TARSUS = 3
TARSUS_TIP = 4
ANTENNA = 5
STRIPE = 6
tracked_points = [Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.ANTENNA,
Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.ANTENNA,
Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE]
limb_id = [0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
3,
4, 4, 4,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
7, 7, 7, 7, 7,
8,
9, 9, 9]
__limb_visible_left = [True, True, True, True, True,
False, False, False, False, False]
__limb_visible_right = [False, False, False, False, False,
True, True, True, True, True]
__limb_visible_mid = [True, True, False, True, False,
True, True, False, True, False]
bones = [[0, 1], [1, 2], [2, 3], [3, 4],
[5, 6], [6, 7], [7, 8], [8, 9],
[10, 11], [11, 12], [12, 13], [13, 14],
[16, 17], [17, 18],
[19, 20], [20, 21], [21, 22], [22, 23],
[24, 25], [25, 26], [26, 27], [27, 28],
[29, 30], [30, 31], [31, 32], [32, 33],
[35, 36], [36, 37]]
# bones3d = [[15, 34], [15, 16], [34, 16]]
bones3d = [[15, 34]]
colors = [(255, 0, 0),
(0, 0, 255),
(0, 255, 0),
(150, 200, 200),
(255, 165, 0),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(150, 200, 200),
(255, 165, 0)]
num_joints = len(tracked_points)
num_limbs = len(set(limb_id))
def is_body_coxa(joint_id):
return tracked_points[joint_id] == Tracked.BODY_COXA
def is_coxa_femur(joint_id):
return tracked_points[joint_id] == Tracked.COXA_FEMUR
def is_femur_tibia(joint_id):
return tracked_points[joint_id] == Tracked.FEMUR_TIBIA
def is_tibia_tarsus(joint_id):
return tracked_points[joint_id] == Tracked.TIBIA_TARSUS
def is_antenna(joint_id):
return tracked_points[joint_id] == Tracked.ANTENNA
def is_stripe(joint_id):
return tracked_points[joint_id] == Tracked.STRIPE
def is_tarsus_tip(joint_id):
return tracked_points[joint_id] == Tracked.TARSUS_TIP
def get_limb_id(joint_id):
return limb_id[joint_id]
def is_joint_visible_left(joint_id):
return __limb_visible_left[get_limb_id(joint_id)]
def is_joint_visible_right(joint_id):
return __limb_visible_right[get_limb_id(joint_id)]
def is_limb_visible_left(limb_id):
return __limb_visible_left[limb_id]
def is_limb_visible_right(limb_id):
return __limb_visible_right[limb_id]
def is_limb_visible_mid(limb_id):
return __limb_visible_mid[limb_id]
def camera_see_limb(camera_id, limb_id):
if camera_id < 3:
return is_limb_visible_left(limb_id)
elif camera_id==3:
return is_limb_visible_mid(limb_id)
elif camera_id > 3:
return is_limb_visible_right(limb_id)
else:
raise NotImplementedError
def camera_see_joint(camera_id, joint_id):
if camera_id in [2, 4]: # they cannot see the stripes
return camera_see_limb(camera_id, limb_id[joint_id]) and not (tracked_points[joint_id]==Tracked.STRIPE and not (limb_id[joint_id] not in [2, 6]))
elif camera_id == 3:
return camera_see_limb(camera_id, limb_id[joint_id]) and tracked_points[joint_id] != Tracked.BODY_COXA
else:
return camera_see_limb(camera_id, limb_id[joint_id])
bone_param = np.ones((num_joints, 2), dtype=float)
bone_param[:, 0] = 0.85
bone_param[:, 1] = 0.2
for joint_id in range(num_joints):
if is_body_coxa(joint_id) or is_stripe(joint_id) or is_antenna(joint_id):
bone_param[joint_id, 1] = 10000 # no bone
ignore_joint_id = [joint_id for joint_id in
range(num_joints) if
is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]
ignore_joint_id_wo_stripe = [joint_id for joint_id in
range(num_joints) if
is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]
| 30.65625 | 154 | 0.628746 | from enum import Enum
import numpy as np
num_cameras = 7
class Tracked(Enum):
BODY_COXA = 0
COXA_FEMUR = 1
FEMUR_TIBIA = 2
TIBIA_TARSUS = 3
TARSUS_TIP = 4
ANTENNA = 5
STRIPE = 6
tracked_points = [Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.ANTENNA,
Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.BODY_COXA, Tracked.COXA_FEMUR, Tracked.FEMUR_TIBIA, Tracked.TIBIA_TARSUS, Tracked.TARSUS_TIP,
Tracked.ANTENNA,
Tracked.STRIPE, Tracked.STRIPE, Tracked.STRIPE]
limb_id = [0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
3,
4, 4, 4,
5, 5, 5, 5, 5,
6, 6, 6, 6, 6,
7, 7, 7, 7, 7,
8,
9, 9, 9]
__limb_visible_left = [True, True, True, True, True,
False, False, False, False, False]
__limb_visible_right = [False, False, False, False, False,
True, True, True, True, True]
__limb_visible_mid = [True, True, False, True, False,
True, True, False, True, False]
bones = [[0, 1], [1, 2], [2, 3], [3, 4],
[5, 6], [6, 7], [7, 8], [8, 9],
[10, 11], [11, 12], [12, 13], [13, 14],
[16, 17], [17, 18],
[19, 20], [20, 21], [21, 22], [22, 23],
[24, 25], [25, 26], [26, 27], [27, 28],
[29, 30], [30, 31], [31, 32], [32, 33],
[35, 36], [36, 37]]
bones3d = [[15, 34]]
colors = [(255, 0, 0),
(0, 0, 255),
(0, 255, 0),
(150, 200, 200),
(255, 165, 0),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(150, 200, 200),
(255, 165, 0)]
num_joints = len(tracked_points)
num_limbs = len(set(limb_id))
def is_body_coxa(joint_id):
return tracked_points[joint_id] == Tracked.BODY_COXA
def is_coxa_femur(joint_id):
return tracked_points[joint_id] == Tracked.COXA_FEMUR
def is_femur_tibia(joint_id):
return tracked_points[joint_id] == Tracked.FEMUR_TIBIA
def is_tibia_tarsus(joint_id):
return tracked_points[joint_id] == Tracked.TIBIA_TARSUS
def is_antenna(joint_id):
return tracked_points[joint_id] == Tracked.ANTENNA
def is_stripe(joint_id):
return tracked_points[joint_id] == Tracked.STRIPE
def is_tarsus_tip(joint_id):
return tracked_points[joint_id] == Tracked.TARSUS_TIP
def get_limb_id(joint_id):
return limb_id[joint_id]
def is_joint_visible_left(joint_id):
return __limb_visible_left[get_limb_id(joint_id)]
def is_joint_visible_right(joint_id):
return __limb_visible_right[get_limb_id(joint_id)]
def is_limb_visible_left(limb_id):
return __limb_visible_left[limb_id]
def is_limb_visible_right(limb_id):
return __limb_visible_right[limb_id]
def is_limb_visible_mid(limb_id):
return __limb_visible_mid[limb_id]
def camera_see_limb(camera_id, limb_id):
if camera_id < 3:
return is_limb_visible_left(limb_id)
elif camera_id==3:
return is_limb_visible_mid(limb_id)
elif camera_id > 3:
return is_limb_visible_right(limb_id)
else:
raise NotImplementedError
def camera_see_joint(camera_id, joint_id):
if camera_id in [2, 4]:
return camera_see_limb(camera_id, limb_id[joint_id]) and not (tracked_points[joint_id]==Tracked.STRIPE and not (limb_id[joint_id] not in [2, 6]))
elif camera_id == 3:
return camera_see_limb(camera_id, limb_id[joint_id]) and tracked_points[joint_id] != Tracked.BODY_COXA
else:
return camera_see_limb(camera_id, limb_id[joint_id])
bone_param = np.ones((num_joints, 2), dtype=float)
bone_param[:, 0] = 0.85
bone_param[:, 1] = 0.2
for joint_id in range(num_joints):
if is_body_coxa(joint_id) or is_stripe(joint_id) or is_antenna(joint_id):
bone_param[joint_id, 1] = 10000
ignore_joint_id = [joint_id for joint_id in
range(num_joints) if
is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]
ignore_joint_id_wo_stripe = [joint_id for joint_id in
range(num_joints) if
is_body_coxa(joint_id) or is_coxa_femur(joint_id) or is_antenna(joint_id)]
| true | true |
f7f4c66e3e6c6e0b09291fc26896e0f0da035e95 | 4,577 | py | Python | aiokubernetes/models/v1beta2_stateful_set_update_strategy.py | tantioch/aiokubernetes | 2f332498598ece14d22f8e59ecb02665db6db68d | [
"Apache-2.0"
] | 24 | 2018-07-07T15:12:19.000Z | 2021-09-01T07:33:11.000Z | aiokubernetes/models/v1beta2_stateful_set_update_strategy.py | revoteon/aiokubernetes | 730eae03e4779563740f07ad3ecef180b511ac18 | [
"Apache-2.0"
] | 5 | 2018-07-11T00:09:17.000Z | 2018-10-22T16:41:54.000Z | aiokubernetes/models/v1beta2_stateful_set_update_strategy.py | revoteon/aiokubernetes | 730eae03e4779563740f07ad3ecef180b511ac18 | [
"Apache-2.0"
] | 3 | 2018-07-10T10:16:57.000Z | 2018-10-20T19:32:05.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
from aiokubernetes.models.v1beta2_rolling_update_stateful_set_strategy import V1beta2RollingUpdateStatefulSetStrategy # noqa: F401,E501
class V1beta2StatefulSetUpdateStrategy(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rolling_update': 'V1beta2RollingUpdateStatefulSetStrategy',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None): # noqa: E501
"""V1beta2StatefulSetUpdateStrategy - a model defined in Swagger""" # noqa: E501
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""Gets the rolling_update of this V1beta2StatefulSetUpdateStrategy. # noqa: E501
RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. # noqa: E501
:return: The rolling_update of this V1beta2StatefulSetUpdateStrategy. # noqa: E501
:rtype: V1beta2RollingUpdateStatefulSetStrategy
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""Sets the rolling_update of this V1beta2StatefulSetUpdateStrategy.
RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. # noqa: E501
:param rolling_update: The rolling_update of this V1beta2StatefulSetUpdateStrategy. # noqa: E501
:type: V1beta2RollingUpdateStatefulSetStrategy
"""
self._rolling_update = rolling_update
@property
def type(self):
"""Gets the type of this V1beta2StatefulSetUpdateStrategy. # noqa: E501
Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate. # noqa: E501
:return: The type of this V1beta2StatefulSetUpdateStrategy. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1beta2StatefulSetUpdateStrategy.
Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate. # noqa: E501
:param type: The type of this V1beta2StatefulSetUpdateStrategy. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2StatefulSetUpdateStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.784722 | 136 | 0.626611 |
import pprint
import re
from aiokubernetes.models.v1beta2_rolling_update_stateful_set_strategy import V1beta2RollingUpdateStatefulSetStrategy
class V1beta2StatefulSetUpdateStrategy(object):
swagger_types = {
'rolling_update': 'V1beta2RollingUpdateStatefulSetStrategy',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None):
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
self._rolling_update = rolling_update
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
def to_dict(self):
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta2StatefulSetUpdateStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7f4c6bf1cb1128800470db66ee985063aad5fd6 | 18,265 | py | Python | bareasgi_graphql_next/controller.py | rob-blackbourn/bareasgi-graphql | 84e46f4e082630973a275b44811ef1136bbde418 | [
"Apache-2.0"
] | 2 | 2019-04-30T11:25:21.000Z | 2019-05-13T19:38:19.000Z | bareasgi_graphql_next/controller.py | rob-blackbourn/bareasgi-graphql | 84e46f4e082630973a275b44811ef1136bbde418 | [
"Apache-2.0"
] | null | null | null | bareasgi_graphql_next/controller.py | rob-blackbourn/bareasgi-graphql | 84e46f4e082630973a275b44811ef1136bbde418 | [
"Apache-2.0"
] | null | null | null | """GraphQL base controller"""
from abc import ABCMeta, abstractmethod
import asyncio
from cgi import parse_multipart
from datetime import datetime
from functools import partial
import io
import logging
from typing import (
Any,
AsyncIterable,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
cast
)
from urllib.parse import parse_qs, urlencode
from bareasgi import (
Application,
HttpRequest,
HttpResponse,
WebSocketRequest,
HttpMiddlewareCallback
)
from bareutils import text_reader, text_writer, response_code, header
import graphql
from graphql import (
ExecutionResult,
GraphQLError,
MapAsyncIterator,
MiddlewareManager
)
from .template import make_template
from .utils import (
cancellable_aiter,
get_host,
get_scheme,
has_subscription,
wrap_middleware,
ZeroEvent
)
LOGGER = logging.getLogger(__name__)
def _encode_sse(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
payload = f'event: ping\ndata: {datetime.utcnow()}\n\n'
else:
response = {
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}
payload = f'event: message\ndata: {dumps(response)}\n\n'
return payload.encode('utf-8')
def _encode_json(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
return b'\n'
payload = dumps({
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}) + '\n'
return payload.encode('utf-8')
class GraphQLControllerBase(metaclass=ABCMeta):
"""GraphQL Controller Base"""
def __init__(
self,
path_prefix: str,
middleware: Optional[Union[Tuple, List, MiddlewareManager]],
ping_interval: float,
loads: Callable[[str], Any],
dumps: Callable[[Any], str]
) -> None:
self.path_prefix = path_prefix
self.middleware = middleware
self.ping_interval = ping_interval
self.loads = loads
self.dumps = dumps
self.cancellation_event = asyncio.Event()
self.subscription_count = ZeroEvent()
def add_routes(
self,
app: Application,
path_prefix: str = '',
rest_middleware: Optional[HttpMiddlewareCallback] = None,
view_middleware: Optional[HttpMiddlewareCallback] = None
) -> Application:
"""Add the routes
Args:
app (Application): The ASGI application.
path_prefix (str, optional): The path prefix. Defaults to ''.
rest_middleware (Optional[HttpMiddlewareCallback], optional): The
rest middleware. Defaults to None.
view_middleware (Optional[HttpMiddlewareCallback], optional): The
view middleware. Defaults to None.
Returns:
Application: The application.
"""
# Add the REST routes.
app.http_router.add(
{'GET'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'GET'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_get)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_post)
)
# Add the subscription route
app.ws_router.add(
path_prefix + '/subscriptions',
self.handle_websocket_subscription
)
# Add Graphiql
app.http_router.add(
{'GET'},
path_prefix + '/graphiql',
wrap_middleware(view_middleware, self.view_graphiql)
)
return app
async def shutdown(self) -> None:
"""Shutdown the service"""
self.cancellation_event.set()
await self.subscription_count.wait()
async def view_graphiql(self, request: HttpRequest) -> HttpResponse:
"""Render the Graphiql view
Args:
request (HttpRequest): The request.
Returns:
HttpResponse: The response.
"""
try:
host = get_host(request)
scheme = get_scheme(request)
query_path = f'{scheme}://{host}{self.path_prefix}/graphql'
ws_scheme = 'ws' if scheme == 'http' else 'wss'
subscription_path = f'{ws_scheme}://{host}{self.path_prefix}/subscriptions'
body = make_template(
host,
query_path,
subscription_path
)
headers = [
(b'content-type', b'text/html'),
(b'content-length', str(len(body)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(body))
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle grahphiql request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
@abstractmethod
async def handle_websocket_subscription(self, request: WebSocketRequest) -> None:
"""Handle a websocket subscription
Args:
request (WebSocketRequest): The request
"""
async def handle_graphql(self, request: HttpRequest) -> HttpResponse:
"""A request handler for graphql queries
Args:
scope (Scope): The Request
Returns:
HttpResponse: The HTTP response to the query request
"""
try:
body = await self._get_query_document(request)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
query_document = graphql.parse(query)
if not has_subscription(query_document):
return await self._handle_query_or_mutation(
request,
query,
variables,
operation_name
)
# The subscription method is determined by the `allow` header.
allow = header.find(b'allow', request.scope['headers'], b'GET')
if allow == b'GET':
return self._handle_subscription_redirect(request, body)
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql query request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_get(self, request: HttpRequest) -> HttpResponse:
"""Handle a streaming subscription
Args:
request (HttpRequest): The request
Returns:
HttpResponse: The streaming response
"""
try:
LOGGER.debug(
"Received GET streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
body = {
name.decode('utf-8'): self.loads(value[0].decode('utf-8'))
for name, value in cast(
Dict[bytes, List[bytes]],
parse_qs(request.scope['query_string'])
).items()
}
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql GET subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_post(self, request: HttpRequest) -> HttpResponse:
"""Handle a streaming subscription
Args:
request (HttpRequest): The request
Returns:
HttpResponse: A stream response
"""
try:
LOGGER.debug(
"Received POST streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
text = await text_reader(request.body)
body = self.loads(text)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
# pylint: disable=bare-except
except:
LOGGER.exception("Failed to handle graphql POST subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def _get_query_document(self, request: HttpRequest) -> Mapping[str, Any]:
content_type = header.content_type(request.scope['headers'])
if content_type is None:
raise ValueError('Content type not specified')
media_type, parameters = content_type
if media_type == b'application/graphql':
return {'query': await text_reader(request.body)}
elif media_type in (b'application/json', b'text/plain'):
return self.loads(await text_reader(request.body))
elif media_type == b'application/x-www-form-urlencoded':
body = parse_qs(await text_reader(request.body))
return {name: value[0] for name, value in body.items()}
elif media_type == b'multipart/form-data':
if parameters is None:
raise ValueError(
'Missing content type parameters for multipart/form-data'
)
param_dict = {
key.decode('utf-8'): val
for key, val in parameters.items()
}
multipart_dict = parse_multipart(
io.StringIO(await text_reader(request.body)),
param_dict
)
return {
name: value[0]
for name, value in multipart_dict.items()
}
else:
raise RuntimeError(
f"Unsupported content type: {media_type.decode('ascii')}"
)
async def _handle_query_or_mutation(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
LOGGER.debug("Processing a query or mutation.")
result = await self.query(request, query, variables, operation_name)
response: Dict[str, Any] = {'data': result.data}
if result.errors:
response['errors'] = [
error.formatted for error in result.errors]
text = self.dumps(response)
headers = [
(b'content-type', b'application/json'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(text))
def _handle_subscription_redirect(
self,
request: HttpRequest,
body: Mapping[str, Any]
) -> HttpResponse:
# Handle a subscription by returning 201 (Created) with
# the url location of the subscription.
LOGGER.debug("Redirecting subscription request.")
scheme = request.scope['scheme']
host = cast(
bytes,
header.find( # type: ignore
b'host',
request.scope['headers'],
b'localhost'
)
).decode()
path = self.path_prefix + '/subscriptions'
query_string = urlencode(
{
name.encode('utf-8'): self.dumps(value).encode('utf-8')
for name, value in body.items()
}
)
location = f'{scheme}://{host}{path}?{query_string}'.encode('ascii')
headers = [
(b'access-control-expose-headers', b'location'),
(b'location', location)
]
return HttpResponse(response_code.CREATED, headers)
async def _handle_streaming_subscription(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
# If unspecified default to server sent events as they have better support.
accept = cast(
bytes,
header.find(
b'accept', request.scope['headers'], b'text/event-stream')
)
content_type = (
b'application/stream+json'
if accept == b'application/json'
else accept
)
result = await self.subscribe(request, query, variables, operation_name)
is_sse = content_type == b'text/event-stream'
encode = partial(_encode_sse if is_sse else _encode_json, self.dumps)
nudge = b':\n\n' if is_sse else b'\n'
# Make an async iterator for the subscription results.
async def send_events(zero_event: ZeroEvent) -> AsyncIterable[bytes]:
LOGGER.debug('Streaming subscription started.')
try:
zero_event.increment()
async for val in cancellable_aiter(
result,
self.cancellation_event,
timeout=self.ping_interval
):
yield encode(val)
yield nudge # Give the ASGI server a nudge.
except asyncio.CancelledError:
LOGGER.debug("Streaming subscription cancelled.")
except Exception as error: # pylint: disable=broad-except
LOGGER.exception("Streaming subscription failed.")
# If the error is not caught the client fetch will fail, however
# the status code and headers have already been sent. So rather
# than let the fetch fail we send a GraphQL response with no
# data and the error and close gracefully.
if not isinstance(error, GraphQLError):
error = GraphQLError(
'Execution error',
original_error=error
)
val = ExecutionResult(None, [error])
yield encode(val)
yield nudge # Give the ASGI server a nudge.
finally:
zero_event.decrement()
LOGGER.debug("Streaming subscription stopped.")
headers = [
(b'cache-control', b'no-cache'),
(b'content-type', content_type),
(b'connection', b'keep-alive')
]
return HttpResponse(
response_code.OK,
headers,
send_events(self.subscription_count)
)
@abstractmethod
async def subscribe(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> MapAsyncIterator:
"""Execute a subscription.
Args:
request (HttpRequest): The http request.
query (str): The subscription query.
variables (Optional[Dict[str, Any]]): Optional variables.
operation_name (Optional[str]): An optional operation name.
Returns:
MapAsyncIterator: An asynchronous iterator of the results.
"""
@abstractmethod
async def query(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> ExecutionResult:
"""Execute a query
Args:
request (HttpRequest): The http request.
query (str): The subscription query.
variables (Optional[Dict[str, Any]]): Optional variables.
operation_name (Optional[str]): An optional operation name.
Returns:
ExecutionResult: The query results.
"""
| 31.876091 | 87 | 0.552642 |
from abc import ABCMeta, abstractmethod
import asyncio
from cgi import parse_multipart
from datetime import datetime
from functools import partial
import io
import logging
from typing import (
Any,
AsyncIterable,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
cast
)
from urllib.parse import parse_qs, urlencode
from bareasgi import (
Application,
HttpRequest,
HttpResponse,
WebSocketRequest,
HttpMiddlewareCallback
)
from bareutils import text_reader, text_writer, response_code, header
import graphql
from graphql import (
ExecutionResult,
GraphQLError,
MapAsyncIterator,
MiddlewareManager
)
from .template import make_template
from .utils import (
cancellable_aiter,
get_host,
get_scheme,
has_subscription,
wrap_middleware,
ZeroEvent
)
LOGGER = logging.getLogger(__name__)
def _encode_sse(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
payload = f'event: ping\ndata: {datetime.utcnow()}\n\n'
else:
response = {
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}
payload = f'event: message\ndata: {dumps(response)}\n\n'
return payload.encode('utf-8')
def _encode_json(
dumps: Callable[[Any], str],
execution_result: Optional[ExecutionResult]
) -> bytes:
if execution_result is None:
return b'\n'
payload = dumps({
'data': execution_result.data,
'errors': [
error.formatted
for error in execution_result.errors
] if execution_result.errors else None
}) + '\n'
return payload.encode('utf-8')
class GraphQLControllerBase(metaclass=ABCMeta):
def __init__(
self,
path_prefix: str,
middleware: Optional[Union[Tuple, List, MiddlewareManager]],
ping_interval: float,
loads: Callable[[str], Any],
dumps: Callable[[Any], str]
) -> None:
self.path_prefix = path_prefix
self.middleware = middleware
self.ping_interval = ping_interval
self.loads = loads
self.dumps = dumps
self.cancellation_event = asyncio.Event()
self.subscription_count = ZeroEvent()
def add_routes(
self,
app: Application,
path_prefix: str = '',
rest_middleware: Optional[HttpMiddlewareCallback] = None,
view_middleware: Optional[HttpMiddlewareCallback] = None
) -> Application:
app.http_router.add(
{'GET'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/graphql',
wrap_middleware(rest_middleware, self.handle_graphql)
)
app.http_router.add(
{'GET'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_get)
)
app.http_router.add(
{'POST', 'OPTIONS'},
path_prefix + '/subscriptions',
wrap_middleware(rest_middleware, self.handle_subscription_post)
)
app.ws_router.add(
path_prefix + '/subscriptions',
self.handle_websocket_subscription
)
app.http_router.add(
{'GET'},
path_prefix + '/graphiql',
wrap_middleware(view_middleware, self.view_graphiql)
)
return app
async def shutdown(self) -> None:
self.cancellation_event.set()
await self.subscription_count.wait()
async def view_graphiql(self, request: HttpRequest) -> HttpResponse:
try:
host = get_host(request)
scheme = get_scheme(request)
query_path = f'{scheme}://{host}{self.path_prefix}/graphql'
ws_scheme = 'ws' if scheme == 'http' else 'wss'
subscription_path = f'{ws_scheme}://{host}{self.path_prefix}/subscriptions'
body = make_template(
host,
query_path,
subscription_path
)
headers = [
(b'content-type', b'text/html'),
(b'content-length', str(len(body)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(body))
except:
LOGGER.exception("Failed to handle grahphiql request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
@abstractmethod
async def handle_websocket_subscription(self, request: WebSocketRequest) -> None:
async def handle_graphql(self, request: HttpRequest) -> HttpResponse:
try:
body = await self._get_query_document(request)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
query_document = graphql.parse(query)
if not has_subscription(query_document):
return await self._handle_query_or_mutation(
request,
query,
variables,
operation_name
)
allow = header.find(b'allow', request.scope['headers'], b'GET')
if allow == b'GET':
return self._handle_subscription_redirect(request, body)
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
except:
LOGGER.exception("Failed to handle graphql query request")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_get(self, request: HttpRequest) -> HttpResponse:
try:
LOGGER.debug(
"Received GET streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
body = {
name.decode('utf-8'): self.loads(value[0].decode('utf-8'))
for name, value in cast(
Dict[bytes, List[bytes]],
parse_qs(request.scope['query_string'])
).items()
}
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
except:
LOGGER.exception("Failed to handle graphql GET subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def handle_subscription_post(self, request: HttpRequest) -> HttpResponse:
try:
LOGGER.debug(
"Received POST streaming subscription request: http_version='%s'.",
request.scope['http_version']
)
text = await text_reader(request.body)
body = self.loads(text)
query: str = body['query']
variables: Optional[Dict[str, Any]] = body.get('variables')
operation_name: Optional[str] = body.get('operationName')
return await self._handle_streaming_subscription(
request,
query,
variables,
operation_name
)
except:
LOGGER.exception("Failed to handle graphql POST subscription")
text = 'Internal server error'
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(
response_code.INTERNAL_SERVER_ERROR,
headers,
text_writer(text)
)
async def _get_query_document(self, request: HttpRequest) -> Mapping[str, Any]:
content_type = header.content_type(request.scope['headers'])
if content_type is None:
raise ValueError('Content type not specified')
media_type, parameters = content_type
if media_type == b'application/graphql':
return {'query': await text_reader(request.body)}
elif media_type in (b'application/json', b'text/plain'):
return self.loads(await text_reader(request.body))
elif media_type == b'application/x-www-form-urlencoded':
body = parse_qs(await text_reader(request.body))
return {name: value[0] for name, value in body.items()}
elif media_type == b'multipart/form-data':
if parameters is None:
raise ValueError(
'Missing content type parameters for multipart/form-data'
)
param_dict = {
key.decode('utf-8'): val
for key, val in parameters.items()
}
multipart_dict = parse_multipart(
io.StringIO(await text_reader(request.body)),
param_dict
)
return {
name: value[0]
for name, value in multipart_dict.items()
}
else:
raise RuntimeError(
f"Unsupported content type: {media_type.decode('ascii')}"
)
async def _handle_query_or_mutation(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
LOGGER.debug("Processing a query or mutation.")
result = await self.query(request, query, variables, operation_name)
response: Dict[str, Any] = {'data': result.data}
if result.errors:
response['errors'] = [
error.formatted for error in result.errors]
text = self.dumps(response)
headers = [
(b'content-type', b'application/json'),
(b'content-length', str(len(text)).encode())
]
return HttpResponse(response_code.OK, headers, text_writer(text))
def _handle_subscription_redirect(
self,
request: HttpRequest,
body: Mapping[str, Any]
) -> HttpResponse:
LOGGER.debug("Redirecting subscription request.")
scheme = request.scope['scheme']
host = cast(
bytes,
header.find(
b'host',
request.scope['headers'],
b'localhost'
)
).decode()
path = self.path_prefix + '/subscriptions'
query_string = urlencode(
{
name.encode('utf-8'): self.dumps(value).encode('utf-8')
for name, value in body.items()
}
)
location = f'{scheme}://{host}{path}?{query_string}'.encode('ascii')
headers = [
(b'access-control-expose-headers', b'location'),
(b'location', location)
]
return HttpResponse(response_code.CREATED, headers)
async def _handle_streaming_subscription(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str]
) -> HttpResponse:
accept = cast(
bytes,
header.find(
b'accept', request.scope['headers'], b'text/event-stream')
)
content_type = (
b'application/stream+json'
if accept == b'application/json'
else accept
)
result = await self.subscribe(request, query, variables, operation_name)
is_sse = content_type == b'text/event-stream'
encode = partial(_encode_sse if is_sse else _encode_json, self.dumps)
nudge = b':\n\n' if is_sse else b'\n'
async def send_events(zero_event: ZeroEvent) -> AsyncIterable[bytes]:
LOGGER.debug('Streaming subscription started.')
try:
zero_event.increment()
async for val in cancellable_aiter(
result,
self.cancellation_event,
timeout=self.ping_interval
):
yield encode(val)
yield nudge
except asyncio.CancelledError:
LOGGER.debug("Streaming subscription cancelled.")
except Exception as error:
LOGGER.exception("Streaming subscription failed.")
if not isinstance(error, GraphQLError):
error = GraphQLError(
'Execution error',
original_error=error
)
val = ExecutionResult(None, [error])
yield encode(val)
yield nudge
finally:
zero_event.decrement()
LOGGER.debug("Streaming subscription stopped.")
headers = [
(b'cache-control', b'no-cache'),
(b'content-type', content_type),
(b'connection', b'keep-alive')
]
return HttpResponse(
response_code.OK,
headers,
send_events(self.subscription_count)
)
@abstractmethod
async def subscribe(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> MapAsyncIterator:
@abstractmethod
async def query(
self,
request: HttpRequest,
query: str,
variables: Optional[Dict[str, Any]],
operation_name: Optional[str],
) -> ExecutionResult:
| true | true |
f7f4c6f753830bb7bd47d2fbd61d5c668520ec93 | 3,291 | py | Python | benchmark/startCirq2847.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2847.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2847.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=42
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.Y.on(input_qubit[1])) # number=29
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[1])) # number=30
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=19
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=24
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.H.on(input_qubit[3])) # number=26
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=31
c.append(cirq.X.on(input_qubit[3])) # number=32
c.append(cirq.H.on(input_qubit[3])) # number=39
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=40
c.append(cirq.H.on(input_qubit[3])) # number=41
c.append(cirq.H.on(input_qubit[3])) # number=36
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=37
c.append(cirq.H.on(input_qubit[3])) # number=38
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=15
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=22
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=23
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=28
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=34
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=35
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2847.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 37.397727 | 77 | 0.680036 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
from cirq.contrib.svg import SVGCircuit
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.Y.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.Y.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3]))
c.append(cirq.X.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3]))
c.append(cirq.Y.on(input_qubit[2]))
c.append(cirq.Y.on(input_qubit[2]))
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2847.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
f7f4c74b0831cf200b31baf17e151bdd4d169d7d | 8,841 | py | Python | shotgunCache/validateFields.py | moonbot/shotgun-cache-server | 1a4d287e89cf6422b564accc5db4e7879aaad51d | [
"MIT"
] | 11 | 2015-07-15T09:29:45.000Z | 2020-07-20T03:45:57.000Z | shotgunCache/validateFields.py | moonbot/shotgun-cache-server | 1a4d287e89cf6422b564accc5db4e7879aaad51d | [
"MIT"
] | 1 | 2016-03-11T16:58:51.000Z | 2021-04-04T18:08:05.000Z | shotgunCache/validateFields.py | moonbot/shotgun-cache-server | 1a4d287e89cf6422b564accc5db4e7879aaad51d | [
"MIT"
] | 3 | 2016-03-10T08:51:37.000Z | 2018-02-16T07:04:17.000Z | import os
import time
import logging
import multiprocessing
import difflib
import Queue
import rethinkdb
import utils
__all__ = [
'FieldValidator'
]
LOG = logging.getLogger(__name__)
class FieldValidator(object):
def __init__(self, config, entityConfigManager, entityConfigs, filters, filterOperator, fields, allCachedFields=False):
super(FieldValidator, self).__init__()
self.config = config
self.entityConfigManager = entityConfigManager
self.entityConfigs = entityConfigs
self.filters = filters
self.fields = fields
self.filterOperator = filterOperator
self.allCachedFields = allCachedFields
self.workQueue = multiprocessing.JoinableQueue()
self.resultQueue = multiprocessing.Queue()
self.processes = []
self.results = []
def start(self, raiseExc=True):
LOG.info("Starting Validate Counts")
self.launchWorkers()
self.run()
self.terminateWorkers()
if raiseExc:
failed = []
for result in self.results:
if result['failed']:
failed.append(result)
if len(failed):
raise RuntimeError("Validation Failed, {0} cached entity type(s) do not match".format(len(failed)))
return self.results
def launchWorkers(self):
processCount = min(len(self.entityConfigs), self.config['validate_counts.processes'])
LOG.debug("Launching {0} validate workers".format(processCount))
for n in range(processCount):
worker = FieldValidateWorker(
self.workQueue,
self.resultQueue,
self.config,
self.entityConfigManager,
self.entityConfigs,
filters=self.filters,
filterOperator=self.filterOperator,
fields=self.fields,
allCachedFields=self.allCachedFields,
)
proc = multiprocessing.Process(target=worker.start)
proc.start()
self.processes.append(proc)
def run(self):
LOG.debug("Adding items to validate queue")
for config in self.entityConfigs:
data = {'configType': config.type}
self.workQueue.put(data)
self.workQueue.join()
results = []
while True:
try:
result = self.resultQueue.get(False)
except Queue.Empty:
break
else:
if result:
results.append(result)
self.results = results
def terminateWorkers(self):
LOG.debug("Terminating validate workers")
for proc in self.processes:
proc.terminate()
self.processes = []
class ValidateWorker(object):
def __init__(self, workQueue, resultQueue, config, entityConfigManager, entityConfigs, **kwargs):
super(ValidateWorker, self).__init__()
self.workQueue = workQueue
self.resultQueue = resultQueue
self.config = config
self.entityConfigManager = entityConfigManager
self.entityConfigs = dict([(c.type, c) for c in entityConfigs])
for k, v in kwargs.items():
setattr(self, k, v)
self.sg = None
self.rethink = None
def start(self):
self.sg = self.config.createShotgunConnection(convert_datetimes_to_utc=False)
self.rethink = self.config.createRethinkConnection()
self.run()
def run(self):
raise NotImplemented()
class FieldValidateWorker(ValidateWorker):
def stripNestedEntities(self, entityConfig, entities):
# Strip extra data from nested entities so
# only type and id remains
for entity in entities:
entitySchema = self.entityConfigManager.schema[entityConfig.type]
for field, val in entity.items():
if field not in entitySchema:
continue
if field in ['type', 'id']:
continue
fieldDataType = entitySchema[field].get('data_type', {}).get('value', None)
if fieldDataType == 'multi_entity':
val = [utils.getBaseEntity(e) for e in val]
entity[field] = val
elif fieldDataType == 'entity':
val = utils.getBaseEntity(val)
entity[field] = val
def run(self):
workerPID = os.getpid()
LOG.debug("Field Validate Worker Running: {0}".format(workerPID))
while True:
try:
work = self.workQueue.get()
except Queue.Emtpy:
continue
time.sleep(0.1)
entityConfig = self.entityConfigs[work['configType']]
if self.allCachedFields:
fields = entityConfig['fields'].keys()
else:
fields = self.fields[:]
fields.append('id')
fields.append('type')
fields = list(set(fields))
LOG.debug("Getting fields from Shotgun for type: {0}".format(work['configType']))
shotgunResult = self.sg.find(
entityConfig.type,
filter_operator=self.filterOperator,
filters=self.filters,
fields=fields,
order=[{'field_name': 'id', 'direction': 'asc'}]
)
# Convert any nested entities to base entities (type and id only)
self.stripNestedEntities(entityConfig, shotgunResult)
# Group by id's to match with cache
# Group into a dictionary with the id as key
shotgunMap = dict([(e['id'], e) for e in shotgunResult])
LOG.debug("Getting fields from cache for type: {0}".format(work['configType']))
# Have to batch requests to shotgun in groups of 1024
cacheMatches = []
LOG.debug("Getting total match count from cache for type: {0}".format(work['configType']))
cacheMatches = list(rethinkdb.table(entityConfig['table'])
.filter(lambda e: e['id'] in shotgunMap.keys())
.pluck(fields)
.run(self.rethink))
# Check for missing ids
missingFromCache = []
missingFromShotgun = []
# print("cacheMatches: {0}".format(cacheMatches)) # TESTING
cacheMap = dict([(e['id'], e) for e in cacheMatches])
if len(cacheMap) != len(shotgunMap):
cacheIDSet = set(cacheMap)
shotgunIDSet = set(shotgunMap.keys())
missingIDsFromCache = cacheIDSet.difference(shotgunIDSet)
missingFromCache = dict([(_id, cacheMap[_id]) for _id in missingIDsFromCache])
missingIDsFromShotgun = shotgunIDSet.difference(cacheIDSet)
missingFromShotgun = dict([(_id, cacheMap[_id]) for _id in missingIDsFromShotgun])
# Compare the data for each
failed = False
diffs = []
for _id, shotgunData in shotgunMap.items():
if _id not in cacheMap:
continue
cacheData = cacheMap[_id]
# Sort the nested entities by ID
# Their sort order is not enforced by shotgun
# So we can't count on it staying consistent
shotgunData = utils.sortMultiEntityFieldsByID(self.entityConfigManager.schema, shotgunData)
cacheData = utils.sortMultiEntityFieldsByID(self.entityConfigManager.schema, cacheData)
shotgunJson = utils.prettyJson(shotgunData)
cacheJson = utils.prettyJson(cacheData)
if shotgunJson != cacheJson:
diff = difflib.unified_diff(
str(shotgunJson).split('\n'),
str(cacheJson).split('\n'),
lineterm="",
n=5,
)
# Skip first 3 lines
header = '{type}:{id}\n'.format(type=work['configType'], id=_id)
[diff.next() for x in range(3)]
diff = header + '\n'.join(diff)
diffs.append(diff)
result = {
'work': work,
'entityType': work['configType'],
'failed': failed,
'shotgunMatchCount': len(shotgunMap),
'cacheMatchCount': len(cacheMap),
'missingFromCache': missingFromCache,
'missingFromShotgun': missingFromShotgun,
'diffs': diffs,
}
self.resultQueue.put(result)
self.workQueue.task_done()
| 36.533058 | 123 | 0.557516 | import os
import time
import logging
import multiprocessing
import difflib
import Queue
import rethinkdb
import utils
__all__ = [
'FieldValidator'
]
LOG = logging.getLogger(__name__)
class FieldValidator(object):
def __init__(self, config, entityConfigManager, entityConfigs, filters, filterOperator, fields, allCachedFields=False):
super(FieldValidator, self).__init__()
self.config = config
self.entityConfigManager = entityConfigManager
self.entityConfigs = entityConfigs
self.filters = filters
self.fields = fields
self.filterOperator = filterOperator
self.allCachedFields = allCachedFields
self.workQueue = multiprocessing.JoinableQueue()
self.resultQueue = multiprocessing.Queue()
self.processes = []
self.results = []
def start(self, raiseExc=True):
LOG.info("Starting Validate Counts")
self.launchWorkers()
self.run()
self.terminateWorkers()
if raiseExc:
failed = []
for result in self.results:
if result['failed']:
failed.append(result)
if len(failed):
raise RuntimeError("Validation Failed, {0} cached entity type(s) do not match".format(len(failed)))
return self.results
def launchWorkers(self):
processCount = min(len(self.entityConfigs), self.config['validate_counts.processes'])
LOG.debug("Launching {0} validate workers".format(processCount))
for n in range(processCount):
worker = FieldValidateWorker(
self.workQueue,
self.resultQueue,
self.config,
self.entityConfigManager,
self.entityConfigs,
filters=self.filters,
filterOperator=self.filterOperator,
fields=self.fields,
allCachedFields=self.allCachedFields,
)
proc = multiprocessing.Process(target=worker.start)
proc.start()
self.processes.append(proc)
def run(self):
LOG.debug("Adding items to validate queue")
for config in self.entityConfigs:
data = {'configType': config.type}
self.workQueue.put(data)
self.workQueue.join()
results = []
while True:
try:
result = self.resultQueue.get(False)
except Queue.Empty:
break
else:
if result:
results.append(result)
self.results = results
def terminateWorkers(self):
LOG.debug("Terminating validate workers")
for proc in self.processes:
proc.terminate()
self.processes = []
class ValidateWorker(object):
def __init__(self, workQueue, resultQueue, config, entityConfigManager, entityConfigs, **kwargs):
super(ValidateWorker, self).__init__()
self.workQueue = workQueue
self.resultQueue = resultQueue
self.config = config
self.entityConfigManager = entityConfigManager
self.entityConfigs = dict([(c.type, c) for c in entityConfigs])
for k, v in kwargs.items():
setattr(self, k, v)
self.sg = None
self.rethink = None
def start(self):
self.sg = self.config.createShotgunConnection(convert_datetimes_to_utc=False)
self.rethink = self.config.createRethinkConnection()
self.run()
def run(self):
raise NotImplemented()
class FieldValidateWorker(ValidateWorker):
def stripNestedEntities(self, entityConfig, entities):
for entity in entities:
entitySchema = self.entityConfigManager.schema[entityConfig.type]
for field, val in entity.items():
if field not in entitySchema:
continue
if field in ['type', 'id']:
continue
fieldDataType = entitySchema[field].get('data_type', {}).get('value', None)
if fieldDataType == 'multi_entity':
val = [utils.getBaseEntity(e) for e in val]
entity[field] = val
elif fieldDataType == 'entity':
val = utils.getBaseEntity(val)
entity[field] = val
def run(self):
workerPID = os.getpid()
LOG.debug("Field Validate Worker Running: {0}".format(workerPID))
while True:
try:
work = self.workQueue.get()
except Queue.Emtpy:
continue
time.sleep(0.1)
entityConfig = self.entityConfigs[work['configType']]
if self.allCachedFields:
fields = entityConfig['fields'].keys()
else:
fields = self.fields[:]
fields.append('id')
fields.append('type')
fields = list(set(fields))
LOG.debug("Getting fields from Shotgun for type: {0}".format(work['configType']))
shotgunResult = self.sg.find(
entityConfig.type,
filter_operator=self.filterOperator,
filters=self.filters,
fields=fields,
order=[{'field_name': 'id', 'direction': 'asc'}]
)
self.stripNestedEntities(entityConfig, shotgunResult)
# Group into a dictionary with the id as key
shotgunMap = dict([(e['id'], e) for e in shotgunResult])
LOG.debug("Getting fields from cache for type: {0}".format(work['configType']))
# Have to batch requests to shotgun in groups of 1024
cacheMatches = []
LOG.debug("Getting total match count from cache for type: {0}".format(work['configType']))
cacheMatches = list(rethinkdb.table(entityConfig['table'])
.filter(lambda e: e['id'] in shotgunMap.keys())
.pluck(fields)
.run(self.rethink))
# Check for missing ids
missingFromCache = []
missingFromShotgun = []
# print("cacheMatches: {0}".format(cacheMatches)) # TESTING
cacheMap = dict([(e['id'], e) for e in cacheMatches])
if len(cacheMap) != len(shotgunMap):
cacheIDSet = set(cacheMap)
shotgunIDSet = set(shotgunMap.keys())
missingIDsFromCache = cacheIDSet.difference(shotgunIDSet)
missingFromCache = dict([(_id, cacheMap[_id]) for _id in missingIDsFromCache])
missingIDsFromShotgun = shotgunIDSet.difference(cacheIDSet)
missingFromShotgun = dict([(_id, cacheMap[_id]) for _id in missingIDsFromShotgun])
# Compare the data for each
failed = False
diffs = []
for _id, shotgunData in shotgunMap.items():
if _id not in cacheMap:
continue
cacheData = cacheMap[_id]
# Sort the nested entities by ID
# Their sort order is not enforced by shotgun
# So we can't count on it staying consistent
shotgunData = utils.sortMultiEntityFieldsByID(self.entityConfigManager.schema, shotgunData)
cacheData = utils.sortMultiEntityFieldsByID(self.entityConfigManager.schema, cacheData)
shotgunJson = utils.prettyJson(shotgunData)
cacheJson = utils.prettyJson(cacheData)
if shotgunJson != cacheJson:
diff = difflib.unified_diff(
str(shotgunJson).split('\n'),
str(cacheJson).split('\n'),
lineterm="",
n=5,
)
header = '{type}:{id}\n'.format(type=work['configType'], id=_id)
[diff.next() for x in range(3)]
diff = header + '\n'.join(diff)
diffs.append(diff)
result = {
'work': work,
'entityType': work['configType'],
'failed': failed,
'shotgunMatchCount': len(shotgunMap),
'cacheMatchCount': len(cacheMap),
'missingFromCache': missingFromCache,
'missingFromShotgun': missingFromShotgun,
'diffs': diffs,
}
self.resultQueue.put(result)
self.workQueue.task_done()
| true | true |
f7f4c7d8c5f1a0714e9af14419ae462deb0470c5 | 15,353 | py | Python | pdf_operations.py | TapirLab/pdf-watermarkin | f4e07f068ebb17e36fa2c8065f432ebd0d92a804 | [
"MIT"
] | 2 | 2021-02-23T20:00:18.000Z | 2021-04-24T21:38:01.000Z | pdf_operations.py | TapirLab/pdf-watermarking | f4e07f068ebb17e36fa2c8065f432ebd0d92a804 | [
"MIT"
] | null | null | null | pdf_operations.py | TapirLab/pdf-watermarking | f4e07f068ebb17e36fa2c8065f432ebd0d92a804 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This program includes functions to add watermark to A4 PDFs. Also, miscellaneous
functions are provided to harden OCR (Optical Character Recognition) process and
make encryption possible.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% A PDF Watermarking Script
%% -------------------
%% $Author: Halil Said Cankurtaran$,
%% $Date: January 10th, 2020$,
%% $Revision: 1.0$
%% Tapir Lab.
%% Copyright: Tapir Lab.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import os
import glob
from datetime import datetime
import cv2
import numpy as np
import pikepdf
from pdf2image import convert_from_path
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, landscape
from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger
def set_page(name, orientation):
"""Create an empty A4 PDF page with `name` based on the `orientation`"""
if orientation == 'landscape':
empty_page = canvas.Canvas(name, pagesize=landscape(A4))
else:
empty_page = canvas.Canvas(name, pagesize=A4)
return empty_page
def draw_image(canvas_, image, orientation):
"""Draw given image by scaling to the size of canvas the in the correct orientation
`canvas_` is the page created with `set_page` function. In case of need,
reportlab.pdfgen.canvas can be used to create a custom canvas. However,
since this function draws images after scaling to A4 paper dimensions, the
drawn image may not be properly scaled to the size of a custom canvas.
"""
if orientation == 'landscape':
canvas_.drawImage(image, 0, 0, width=A4[1], height=A4[0])
else:
canvas_.drawImage(image, 0, 0, width=A4[0], height=A4[1])
def blur_image(page, kernel=(5,5), sigma=1):
"""Adds Gaussian noise w. `sigma` and applies Gaussian blur w. `kernel`
If `sigma=0` then it is calculated based on kernel size with following:
sigma = 0.3*((ksize-1)*0.5 - 1) ~~ 1.1 if ksize = 5
Args:
page (PIL.PngImagePlugin.PngImageFile):
Page of PDF that is converted to 'PNG' with
`pdf2image.convert_from_path` function.
kernel (tuple, optional): Gaussian blur kernel size. Defaults to (5,5).
sigma (float, optional): Gaussian blur sigma value. Defaults to 1.
Returns:
np.ndarray, dtype=np.uint8: Blurred image
"""
img = np.asarray(page) # Convert pages object to numpy array
gauss = np.random.normal(0, sigma, img.size) # Create gaussian noise
gauss = gauss.reshape(img.shape[0], img.shape[1], img.shape[2]).astype('uint8')
img_gauss = cv2.add(img,gauss) # Add gaussian noise
blurred_image = cv2.GaussianBlur(img_gauss, kernel, sigma) # Blur image
return blurred_image
def pdf_to_image(path_to_pdf, output_folder, dpi=100, blur=True, kernel=(5,5), sigma=1):
"""Converts pages to image, blurs if True and saves to output_folder.
Args:
path_to_pdf (str): path to input PDF
output_folder (str): path of the folder that images will be saved
dpi (int, optional): Dots Per Inch, conversion parameter. Default = 100.
blur (bool, optional): Whether blur is needed or not. Defaults to True.
kernel (tuple, optional): Gaussian blur kernel size. Defaults to (5,5).
sigma (float, optional): Gaussian blur sigma value. Defaults to 1.
"""
pages = convert_from_path(path_to_pdf, dpi, fmt='PNG') # Convert to PNGs
for (page, j) in zip(pages, range(len(pages))): # Iterate over pages
png_output = os.path.join('.', os.path.join(output_folder, f'_{j}.png'))
# Required to harden optical character recognition (OCR) process
if blur:
blurred_image = blur_image(page, kernel, sigma) # Apply blurring
cv2.imwrite(png_output, blurred_image) # Save blurred image
else:
page.save(png_output, 'PNG') # Save non-blurry image
def image_to_pdf(images_folder, output_folder, orientation, remove_artifacts=False):
"""Writes PNG images in the input_folder onto A4 pages by scaling the size.
If images are not proportional to the dimensions of A4, the written image may be
distorted. If you want to remove images after converting them to PDF,
set `remove_artifacts` to `True`.
Args:
images_folder (str): Path to the folder that includes images.
output_folder (str): Path to the folder that PDFs will be saved
orientation (str): Orientation of page 'landscape' or 'portrait'.
remoremove_artifacts (bool, optional):
Whether to remove the input images or not. Defaults to False.
"""
# Read all "*.png" images in the images_folder
path_to_images = sorted(glob.glob(os.path.join(images_folder,'*.png')))
# Iterate over images and save them seperate A4 PDFs
for (image,j) in zip(path_to_images, range(len(path_to_images))):
canvas_ = set_page(os.path.join(output_folder,f'tmp_{j}.pdf'), orientation)
draw_image(canvas_, image, orientation) # Draw image to page
canvas_.save() # save PDF
if remove_artifacts:
os.remove(image)
def merge_pdfs(input_folder, path_to_output_pdf, remove_artifacts=False):
"""Merges given input PDFs and writes merged version to `output_pdf`
If `remove_artifacts` is `True`, then function removes input PDFs.
Args:
input_folder (str): PDFs that will be merged should be in this folder
output_pdf (str): the path to output PDF, it both includes path and name
remove_artifacts (bool, optional):
Whether to remove the input file(s) or not. Defaults to False.
"""
pdf_merger = PdfFileMerger()
input_pdfs = sorted(glob.glob(os.path.join(input_folder, "*.pdf")))
for path in input_pdfs:
pdf_merger.append(path)
with open(path_to_output_pdf, 'wb') as output_pdf:
pdf_merger.write(output_pdf)
pdf_merger.close()
if remove_artifacts:
for pdf in input_pdfs:
os.remove(pdf)
def pdf_to_image_to_pdf(input_pdf,
tmp_folder,
output_folder,
orientation,
remove_original=False,
remove_artifacts=False):
"""Converts PDF to images and merges as a PDF without blurring.
Set the `remove_artifacts` parameter to clear temporary files created during
the conversions. If it is `True`, temporary images and PDFs will be removed.
Set the `remove_original` to `True' if you want to remove the input PDF.
Args:
input_pdf (str): Path to input PDF.
output_folder (str): Path to the folder that processed PDF will be saved.
remove_original (bool, optional):
Whether remove input_pdf or not. Defaults to False.
remove_artifacts (bool, optional):
Whether to remove the prior processed file(s) or not. Default=False.
Returns:
str: Path of processed PDF.
"""
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
output_pdf = os.path.join(output_folder, file_name + '_im2pdf' + '.pdf')
pdf_to_image(input_pdf, tmp_folder, blur=False)
image_to_pdf(tmp_folder, tmp_folder, orientation, remove_artifacts)
merge_pdfs(tmp_folder, output_pdf, remove_artifacts)
if remove_original:
os.remove(input_pdf)
return output_pdf
def blur_pages_of_pdf(input_pdf,
orientation,
tmp_folder,
output_folder,
dpi=100,
kernel=(5,5),
sigma=1,
remove_artifacts=False,
):
"""Converts content of PDFs to images, blurs and then merges again
Set the `remove_artifacts` parameter to `True` if you want to clear
temporary files created during the conversion operations.
Args:
input_pdf (str): Path to input PDF
orientation (str): Orientation of page 'landscape' or 'portrait'
tmp_folder (str): Path to tmp folder that midproducts will be saved.
output_folder (str): Path to the folder that processed PDF will be saved.
dpi (int, optional): Dots Per Inch, conversion parameter. Default = 100.
kernel (tuple, optional): Gaussian blur kernel size. Defaults to (5,5).
sigma (float, optional): Gaussian blur sigma value. Defaults to 1.
remove_artifacts (bool, optional):
Whether to remove the prior processed file(s) or not. Default=False.
Returns:
[str]: path of output PDF
"""
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
output_pdf = os.path.join(output_folder, file_name + '_blurred' + '.pdf')
# Convert pages of PDF to images and save to `tmp_folder`
pdf_to_image(input_pdf, tmp_folder, dpi, True, kernel, sigma)
# Write images to A4 PDF pages with `orientation` and save to `tmp_folder`
image_to_pdf(tmp_folder, tmp_folder, orientation, remove_artifacts)
# Merge PDFs in `tmp_folder` and write to `output_folder`
# Remove PDFs in tmp_folder after writing operation
merge_pdfs(tmp_folder, output_pdf, remove_artifacts)
return output_pdf
def add_watermark(input_pdf, watermark, output_folder, remove_original=False):
"""Adds watermark to each page of PDF and saves as '*_watermarked.pdf'
Set the `remove_original` parameter to `True` if you want to remove, original
`input_pdf` after watermarking operation.
Args:
input_pdf (str): Path to input PDF.
watermark (str): Path to watermark.
output_folder (str): The folder that processed PDFs will be saved.
remove_original (bool, optional):
Whether to remove the original file or not after watermarking.
The default setting is False.
Returns:
str: Path of output PDF.
"""
file_name = input_pdf.split(os.sep)[-1].split('.')[0] # remove '.pdf'
output_pdf = os.path.join(output_folder, file_name + '_watermarked' + '.pdf')
watermark_page = PdfFileReader(watermark).getPage(0) # Read watermark
pdf_reader = PdfFileReader(input_pdf) # Create reader object
pdf_writer = PdfFileWriter() # Create writer object
for i in range(pdf_reader.getNumPages()): # Add watermark to each page
page = pdf_reader.getPage(i) # Get the page with number i
page.mergePage(watermark_page) # Add watermark
pdf_writer.addPage(page) # add page to the writer object
with open(output_pdf, 'wb') as out:
pdf_writer.write(out) # Write all watermarked pages to out file
if remove_original:
os.remove(input_pdf)
return output_pdf
def move_processed_pdf(input_pdf, processed_folder):
"""Moves `input_pdf` to `processed_folder`.
If there is a PDF with same name in the `processed_folder`, instead of
overwriting the PDF in the `processed_folder`, this function adds a postfix
constructed as `"_exists_" + f'{rnd}' + ".pdf"`. `rnd` is a uniformly
generated random number which takes values in [0,100].
Args:
input_pdf (str): Path to input PDF.
processed_folder (str): Path to folder PDF will be moved.
"""
# Extract file name
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
# Define path to move PDF
new_path_to_input_pdf = os.path.join(processed_folder, file_name + '.pdf')
if os.path.exists(new_path_to_input_pdf): # Check whether PDF exists or not
rnd = np.random.randint(0,100) # Generate a random number to postfix
try:
postfix = "_exists_" + f'{rnd}' + ".pdf" # Create postfix
file_name = file_name + postfix # Add postfix to file_name
# Define path to move postfix added PDF
if_input_pdf_is_exists = os.path.join(processed_folder, file_name)
os.rename(input_pdf, if_input_pdf_is_exists) # Move PDF
except Exception as error:
print("Bad luck, random function returned an existing number\n")
raise error
else:
os.rename(input_pdf, new_path_to_input_pdf)
def encrypt_and_add_metadata(input_pdf,
output_folder,
usr_pass,
owner_pass,
remove_original=False):
"""Encrypts PDF, changes permissions and adds metadata to PDF.
Default permissions let the user to print PDF but all other operations are
restricted. In case you do not want to allow reading without a password,
specify `usr_pass`. If you want to remove the original PDF after encryption
set the `remove_original` parameter to `True`
Args:
input_pdf (str): path to input PDF
output_folder (str): path to output folder
usr_pass (str): user password to open PDF, if "", no pass required.
owner_pass (str): owner password to edit PDF
remove_original (bool, optional):
Whether remove prior processed file(s) or not. Defaults to False.
"""
# Extract file_name from the path
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
# Set output path of PDF
output_pdf = os.path.join(output_folder, file_name + '_final' + '.pdf')
# Metadata sections of PDF. For more information visit the link below.
# https://www.adobe.io/open/standards/xmp.html#!adobe/xmp-docs/master/Namespaces.md
# Dublin Core namespace: dc:title, dc:creator, dc:description, dc:subject, dc:format, dc:rights
# XMP basic namespace: xmp:CreateDate, xmp:CreatorTool, xmp:ModifyDate, xmp:MetadataDate
# XMP rights management namespace: xmpRights:WebStatement, xmpRights:Marked
# XMP media management namespace: xmpMM:DocumentID
pdf = pikepdf.Pdf.open(input_pdf) # Read PDF
with pdf.open_metadata() as meta: # Add Metadata
meta['dc:title'] = 'Lecture Notes'
meta['dc:creator'] = 'Serhan Yarkan, Tapir Lab.' # Author
meta['dc:description'] = 'Tapir Lab. Fall-2020 Lecture Notes'
meta['dc:subject'] = 'Probability, statistics, communications...\n\
ALL HAIL TAPIR!\n\
tapirlab.com' # Keywords
meta['dc:rights'] = 'Tapir Lab. License'
meta['xmp:CreateDate'] = datetime.today().isoformat()
meta['xmp:ModifyDate'] = datetime.today().isoformat()
meta['xmp:CreatorTool'] = "Tapir Lab.'s Automatic Watermarking Script"
meta['xmpRights:WebStatement'] = "http://www.tapirlab.com"
# Set permissions of user
permissions = pikepdf.Permissions(
accessibility=False,
extract=False,
modify_annotation=False,
modify_assembly=False,
modify_form=False,
modify_other=False,
print_lowres=True,
print_highres=True,
)
# Save PDF with added metadata and restricted permissions.
pdf.save(output_pdf, encryption=pikepdf.Encryption(user=usr_pass,
owner=owner_pass,
allow=permissions,
))
# Close PDF object
pdf.close()
if remove_original: # Remove original file if True
os.remove(input_pdf)
| 41.607046 | 99 | 0.650557 |
import os
import glob
from datetime import datetime
import cv2
import numpy as np
import pikepdf
from pdf2image import convert_from_path
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, landscape
from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger
def set_page(name, orientation):
if orientation == 'landscape':
empty_page = canvas.Canvas(name, pagesize=landscape(A4))
else:
empty_page = canvas.Canvas(name, pagesize=A4)
return empty_page
def draw_image(canvas_, image, orientation):
if orientation == 'landscape':
canvas_.drawImage(image, 0, 0, width=A4[1], height=A4[0])
else:
canvas_.drawImage(image, 0, 0, width=A4[0], height=A4[1])
def blur_image(page, kernel=(5,5), sigma=1):
img = np.asarray(page)
gauss = np.random.normal(0, sigma, img.size)
gauss = gauss.reshape(img.shape[0], img.shape[1], img.shape[2]).astype('uint8')
img_gauss = cv2.add(img,gauss)
blurred_image = cv2.GaussianBlur(img_gauss, kernel, sigma)
return blurred_image
def pdf_to_image(path_to_pdf, output_folder, dpi=100, blur=True, kernel=(5,5), sigma=1):
pages = convert_from_path(path_to_pdf, dpi, fmt='PNG')
for (page, j) in zip(pages, range(len(pages))):
png_output = os.path.join('.', os.path.join(output_folder, f'_{j}.png'))
if blur:
blurred_image = blur_image(page, kernel, sigma)
cv2.imwrite(png_output, blurred_image)
else:
page.save(png_output, 'PNG')
def image_to_pdf(images_folder, output_folder, orientation, remove_artifacts=False):
path_to_images = sorted(glob.glob(os.path.join(images_folder,'*.png')))
for (image,j) in zip(path_to_images, range(len(path_to_images))):
canvas_ = set_page(os.path.join(output_folder,f'tmp_{j}.pdf'), orientation)
draw_image(canvas_, image, orientation)
canvas_.save()
if remove_artifacts:
os.remove(image)
def merge_pdfs(input_folder, path_to_output_pdf, remove_artifacts=False):
pdf_merger = PdfFileMerger()
input_pdfs = sorted(glob.glob(os.path.join(input_folder, "*.pdf")))
for path in input_pdfs:
pdf_merger.append(path)
with open(path_to_output_pdf, 'wb') as output_pdf:
pdf_merger.write(output_pdf)
pdf_merger.close()
if remove_artifacts:
for pdf in input_pdfs:
os.remove(pdf)
def pdf_to_image_to_pdf(input_pdf,
tmp_folder,
output_folder,
orientation,
remove_original=False,
remove_artifacts=False):
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
output_pdf = os.path.join(output_folder, file_name + '_im2pdf' + '.pdf')
pdf_to_image(input_pdf, tmp_folder, blur=False)
image_to_pdf(tmp_folder, tmp_folder, orientation, remove_artifacts)
merge_pdfs(tmp_folder, output_pdf, remove_artifacts)
if remove_original:
os.remove(input_pdf)
return output_pdf
def blur_pages_of_pdf(input_pdf,
orientation,
tmp_folder,
output_folder,
dpi=100,
kernel=(5,5),
sigma=1,
remove_artifacts=False,
):
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
output_pdf = os.path.join(output_folder, file_name + '_blurred' + '.pdf')
pdf_to_image(input_pdf, tmp_folder, dpi, True, kernel, sigma)
image_to_pdf(tmp_folder, tmp_folder, orientation, remove_artifacts)
merge_pdfs(tmp_folder, output_pdf, remove_artifacts)
return output_pdf
def add_watermark(input_pdf, watermark, output_folder, remove_original=False):
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
output_pdf = os.path.join(output_folder, file_name + '_watermarked' + '.pdf')
watermark_page = PdfFileReader(watermark).getPage(0)
pdf_reader = PdfFileReader(input_pdf)
pdf_writer = PdfFileWriter()
for i in range(pdf_reader.getNumPages()):
page = pdf_reader.getPage(i)
page.mergePage(watermark_page)
pdf_writer.addPage(page)
with open(output_pdf, 'wb') as out:
pdf_writer.write(out)
if remove_original:
os.remove(input_pdf)
return output_pdf
def move_processed_pdf(input_pdf, processed_folder):
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
new_path_to_input_pdf = os.path.join(processed_folder, file_name + '.pdf')
if os.path.exists(new_path_to_input_pdf):
rnd = np.random.randint(0,100)
try:
postfix = "_exists_" + f'{rnd}' + ".pdf"
file_name = file_name + postfix
if_input_pdf_is_exists = os.path.join(processed_folder, file_name)
os.rename(input_pdf, if_input_pdf_is_exists)
except Exception as error:
print("Bad luck, random function returned an existing number\n")
raise error
else:
os.rename(input_pdf, new_path_to_input_pdf)
def encrypt_and_add_metadata(input_pdf,
output_folder,
usr_pass,
owner_pass,
remove_original=False):
file_name = input_pdf.split(os.sep)[-1].split('.')[0]
output_pdf = os.path.join(output_folder, file_name + '_final' + '.pdf')
df.Pdf.open(input_pdf)
with pdf.open_metadata() as meta:
meta['dc:title'] = 'Lecture Notes'
meta['dc:creator'] = 'Serhan Yarkan, Tapir Lab.'
meta['dc:description'] = 'Tapir Lab. Fall-2020 Lecture Notes'
meta['dc:subject'] = 'Probability, statistics, communications...\n\
ALL HAIL TAPIR!\n\
tapirlab.com'
meta['dc:rights'] = 'Tapir Lab. License'
meta['xmp:CreateDate'] = datetime.today().isoformat()
meta['xmp:ModifyDate'] = datetime.today().isoformat()
meta['xmp:CreatorTool'] = "Tapir Lab.'s Automatic Watermarking Script"
meta['xmpRights:WebStatement'] = "http://www.tapirlab.com"
# Set permissions of user
permissions = pikepdf.Permissions(
accessibility=False,
extract=False,
modify_annotation=False,
modify_assembly=False,
modify_form=False,
modify_other=False,
print_lowres=True,
print_highres=True,
)
# Save PDF with added metadata and restricted permissions.
pdf.save(output_pdf, encryption=pikepdf.Encryption(user=usr_pass,
owner=owner_pass,
allow=permissions,
))
# Close PDF object
pdf.close()
if remove_original: # Remove original file if True
os.remove(input_pdf)
| true | true |
f7f4c9866b1d036fbb9af1f9bf84345bf1046243 | 1,888 | py | Python | tests/test_downloadqueue.py | runrin/castero | 4f516766d126c37a50b02b47676e11a48ed2800d | [
"MIT"
] | null | null | null | tests/test_downloadqueue.py | runrin/castero | 4f516766d126c37a50b02b47676e11a48ed2800d | [
"MIT"
] | null | null | null | tests/test_downloadqueue.py | runrin/castero | 4f516766d126c37a50b02b47676e11a48ed2800d | [
"MIT"
] | null | null | null | import os
from unittest import mock
from castero.downloadqueue import DownloadQueue
from castero.episode import Episode
from castero.feed import Feed
my_dir = os.path.dirname(os.path.realpath(__file__))
feed = Feed(file=my_dir + "/feeds/valid_basic.xml")
episode1 = Episode(feed=feed, title="episode1 title")
episode2 = Episode(feed=feed, title="episode2 title")
def test_downloadqueue_init():
mydownloadqueue = DownloadQueue()
assert isinstance(mydownloadqueue, DownloadQueue)
def test_downloadqueue_add():
mydownloadqueue = DownloadQueue()
assert mydownloadqueue.length == 0
mydownloadqueue.add(episode1)
assert mydownloadqueue.length == 1
mydownloadqueue.add(episode1)
assert mydownloadqueue.length == 1
mydownloadqueue.add(episode2)
assert mydownloadqueue.length == 2
def test_downloadqueue_start():
mydownloadqueue = DownloadQueue()
mydownloadqueue._display = mock.MagicMock()
mydownloadqueue.add(episode1)
episode1.download = mock.MagicMock(name="download")
mydownloadqueue.start()
episode1.download.assert_called_with(mydownloadqueue,
mydownloadqueue._display, )
def test_downloadqueue_first():
mydownloadqueue = DownloadQueue()
mydownloadqueue.add(episode1)
assert mydownloadqueue.first == episode1
def test_downloadqueue_next():
mydownloadqueue = DownloadQueue()
mydownloadqueue.add(episode1)
mydownloadqueue.add(episode2)
mydownloadqueue.start = mock.MagicMock(name="start")
mydownloadqueue.next()
assert mydownloadqueue.length == 1
assert mydownloadqueue.start.call_count == 1
def test_downloadqueue_update():
mydownloadqueue = DownloadQueue()
mydownloadqueue.add(episode1)
mydownloadqueue.start = mock.MagicMock(name="start")
mydownloadqueue.update()
assert mydownloadqueue.start.call_count == 1
| 29.968254 | 68 | 0.746292 | import os
from unittest import mock
from castero.downloadqueue import DownloadQueue
from castero.episode import Episode
from castero.feed import Feed
my_dir = os.path.dirname(os.path.realpath(__file__))
feed = Feed(file=my_dir + "/feeds/valid_basic.xml")
episode1 = Episode(feed=feed, title="episode1 title")
episode2 = Episode(feed=feed, title="episode2 title")
def test_downloadqueue_init():
mydownloadqueue = DownloadQueue()
assert isinstance(mydownloadqueue, DownloadQueue)
def test_downloadqueue_add():
mydownloadqueue = DownloadQueue()
assert mydownloadqueue.length == 0
mydownloadqueue.add(episode1)
assert mydownloadqueue.length == 1
mydownloadqueue.add(episode1)
assert mydownloadqueue.length == 1
mydownloadqueue.add(episode2)
assert mydownloadqueue.length == 2
def test_downloadqueue_start():
mydownloadqueue = DownloadQueue()
mydownloadqueue._display = mock.MagicMock()
mydownloadqueue.add(episode1)
episode1.download = mock.MagicMock(name="download")
mydownloadqueue.start()
episode1.download.assert_called_with(mydownloadqueue,
mydownloadqueue._display, )
def test_downloadqueue_first():
mydownloadqueue = DownloadQueue()
mydownloadqueue.add(episode1)
assert mydownloadqueue.first == episode1
def test_downloadqueue_next():
mydownloadqueue = DownloadQueue()
mydownloadqueue.add(episode1)
mydownloadqueue.add(episode2)
mydownloadqueue.start = mock.MagicMock(name="start")
mydownloadqueue.next()
assert mydownloadqueue.length == 1
assert mydownloadqueue.start.call_count == 1
def test_downloadqueue_update():
mydownloadqueue = DownloadQueue()
mydownloadqueue.add(episode1)
mydownloadqueue.start = mock.MagicMock(name="start")
mydownloadqueue.update()
assert mydownloadqueue.start.call_count == 1
| true | true |
f7f4ca25724ed442b6bef372ecff9c9deeea380d | 546 | py | Python | quietude/quietude/menus/AddQCollectionModifierPieMenu.py | x-serenity/quietude | abd9a0eeb8bb5962f172396ed3db37b30b226c96 | [
"MIT"
] | null | null | null | quietude/quietude/menus/AddQCollectionModifierPieMenu.py | x-serenity/quietude | abd9a0eeb8bb5962f172396ed3db37b30b226c96 | [
"MIT"
] | null | null | null | quietude/quietude/menus/AddQCollectionModifierPieMenu.py | x-serenity/quietude | abd9a0eeb8bb5962f172396ed3db37b30b226c96 | [
"MIT"
] | null | null | null | import bpy
class AddQCollectionModifierPieMenu(bpy.types.Menu):
# label is displayed at the center of the pie menu.
bl_idname = "VIEW3D_MT_AddQCollectionModifierPieMenu"
bl_label = "QCollection Modifiers"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
# operator_enum will just spread all available options
# for the type enum of the operator on the pie
pie.operator_enum(operator="quietude.add_qcollection_modifier", property="modifier_type")
| 39 | 98 | 0.694139 | import bpy
class AddQCollectionModifierPieMenu(bpy.types.Menu):
bl_idname = "VIEW3D_MT_AddQCollectionModifierPieMenu"
bl_label = "QCollection Modifiers"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
pie.operator_enum(operator="quietude.add_qcollection_modifier", property="modifier_type")
| true | true |
f7f4cb01e96b7a34de544b928514048df369208a | 179 | py | Python | request.py | wallik2/Deploy-model-Iris.csv | 55348b96ebdf77a33818eca732ef4a5305a85a1d | [
"MIT"
] | 5 | 2021-12-28T07:59:52.000Z | 2022-01-09T11:31:02.000Z | request.py | wallik2/Deploy-model-Iris.csv | 55348b96ebdf77a33818eca732ef4a5305a85a1d | [
"MIT"
] | 7 | 2021-12-28T07:50:00.000Z | 2022-01-09T09:57:48.000Z | request.py | wallik2/Deploy-model-Iris.csv | 55348b96ebdf77a33818eca732ef4a5305a85a1d | [
"MIT"
] | 1 | 2021-12-17T13:21:48.000Z | 2021-12-17T13:21:48.000Z | import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'sepal_length':2, 'sepal_width':9, 'petal_length':6, 'petal_width':2})
print(r.json()) | 29.833333 | 99 | 0.698324 | import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'sepal_length':2, 'sepal_width':9, 'petal_length':6, 'petal_width':2})
print(r.json()) | true | true |
f7f4cb246eaccee207dfb0854e393bf8840dae2f | 10,323 | py | Python | dcCustom/data/data_loader.py | simonfqy/DTI_prediction | e01c592cc06c4de04b3ed6db35da5af5ff7f863f | [
"MIT"
] | 31 | 2018-08-15T13:35:24.000Z | 2022-02-18T08:11:12.000Z | dcCustom/data/data_loader.py | simonfqy/DTI_prediction | e01c592cc06c4de04b3ed6db35da5af5ff7f863f | [
"MIT"
] | 14 | 2018-07-13T03:56:19.000Z | 2020-05-22T23:25:34.000Z | dcCustom/data/data_loader.py | simonfqy/DTI_prediction | e01c592cc06c4de04b3ed6db35da5af5ff7f863f | [
"MIT"
] | 13 | 2018-07-13T03:56:26.000Z | 2021-02-24T10:58:37.000Z | """
Process an input dataset into a format suitable for machine learning.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
from rdkit import Chem
import time
import sys
import pdb
from deepchem.utils.save import log
from deepchem.utils.save import load_csv_files
#from deepchem.utils.save import load_sdf_files
#from deepchem.utils.save import encode_fasta_sequence
from deepchem.feat import UserDefinedFeaturizer
from dcCustom.data import DiskDataset
from dcCustom.feat import Protein
def convert_df_to_numpy(df, tasks, verbose=False):
"""Transforms a dataframe containing deepchem input into numpy arrays"""
n_samples = df.shape[0]
n_tasks = len(tasks)
time1 = time.time()
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
time2 = time.time()
w = np.ones((n_samples, n_tasks))
missing = np.zeros_like(y).astype(int)
feature_shape = None
for ind in range(n_samples):
for task in range(n_tasks):
if y[ind, task] == "":
missing[ind, task] = 1
# ids = df[id_field].values
# Set missing data to have weight zero
for ind in range(n_samples):
for task in range(n_tasks):
if missing[ind, task]:
y[ind, task] = 0.
w[ind, task] = 0.
return y.astype(float), w.astype(float)
def featurize_protein(df, field, source_field, prot_seq_dict, log_every_N=500, verbose=True):
'''This is supposed to match the format of functions for featurizing molecules.
It is not really featurizing, but only constructs the protein objects from their names.'''
elems = df[field].tolist()
sources = df[source_field].tolist()
proteins = []
for ind, prot in enumerate(elems):
source = sources[ind]
pair = (source, prot)
sequence = prot_seq_dict[pair]
proteins.append([Protein(prot, source = source, sequence = sequence)])
#return np.squeeze(np.array(proteins), axis=1), valid_inds
return np.array(proteins)
def featurize_smiles_df(df, featurizer, field, log_every_N=1000, verbose=True):
"""Featurize individual compounds in dataframe.
Given a featurizer that operates on individual chemical compounds
or macromolecules, compute & add features for that compound to the
features dataframe
"""
sample_elems = df[field].tolist()
features = []
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_fd = open('./logs/error.log', 'a')
os.dup2(stderr_fd.fileno(), stderr_fileno)
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
# TODO (ytz) this is a bandage solution to reorder the atoms so
# that they're always in the same canonical order. Presumably this
# should be correctly implemented in the future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol], smiles=elem))
stderr_fd.close()
os.dup2(stderr_save, stderr_fileno)
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
#return np.squeeze(np.array(features), axis=1), valid_inds
return np.array(features), valid_inds
def featurize_smiles_np(arr, featurizer, log_every_N=1000, verbose=True):
"""Featurize individual compounds in a numpy array.
Given a featurizer that operates on individual chemical compounds
or macromolecules, compute & add features for that compound to the
features array
"""
features = []
for ind, elem in enumerate(arr.tolist()):
mol = Chem.MolFromSmiles(elem)
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
features = np.squeeze(np.array(features))
return features.reshape(-1,)
def get_user_specified_features(df, featurizer, verbose=True):
"""Extract and merge user specified features.
Merge features included in dataset provided by user
into final features dataframe
Three types of featurization here:
1) Molecule featurization
-) Smiles string featurization
-) Rdkit MOL featurization
2) Complex featurization
-) PDB files for interacting molecules.
3) User specified featurizations.
"""
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(
pd.to_numeric)
X_shard = df.as_matrix(columns=featurizer.feature_fields)
time2 = time.time()
log("TIMING: user specified processing took %0.3f s" % (time2 - time1),
verbose)
return X_shard
def featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000):
"""Featurize individual compounds in dataframe.
Featurizes .sdf files, so the 3-D structure should be preserved
so we use the rdkit "mol" object created from .sdf instead of smiles
string. Some featurizers such as CoulombMatrix also require a 3-D
structure. Featurizing from .sdf is currently the only way to
perform CM feautization.
"""
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
"""
Handles loading/featurizing of chemical samples (datapoints).
Currently knows how to load csv-files/pandas-dataframes/SDF-files. Writes a
dataframe object to disk as output.
"""
def __init__(self,
tasks,
smiles_field=None,
id_field=None,
mol_field=None,
featurizer=None,
protein_field=None,
source_field=None,
verbose=True,
prot_seq_dict=None,
log_every_n=1000,
input_protein=True):
"""Extracts data from input as Pandas data frame"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.verbose = verbose
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
self.mol_field = mol_field
self.protein_field = protein_field
self.source_field = source_field
self.prot_seq_dict = prot_seq_dict
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
self.input_protein = input_protein
def featurize(self, input_files, data_dir=None, shard_size=8192):
"""Featurize provided files and write to specified location.
For large datasets, automatically shards into smaller chunks
for convenience.
Parameters
----------
input_files: list
List of input filenames.
data_dir: str
(Optional) Directory to store featurized dataset.
shard_size: int
(Optional) Number of examples stored in each shard.
"""
log("Loading raw samples now.", self.verbose)
log("shard_size: %d" % shard_size, self.verbose)
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(
self.get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self.featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = convert_df_to_numpy(shard, self.tasks, self.id_field)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it makes
# no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
log("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1), self.verbose)
yield X, y, w, ids
return DiskDataset.create_dataset(
shard_generator(), data_dir, self.tasks, verbose=self.verbose)
def get_shards(self, input_files, shard_size):
"""Stub for children classes."""
raise NotImplementedError
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Handles loading of CSV files.
"""
def get_shards(self, input_files, shard_size, verbose=True):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size, verbose=verbose)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
mol_features, valid_inds = featurize_smiles_df(shard, self.featurizer, field=self.smiles_field)
if len(mol_features.shape) > 2:
mol_features = np.squeeze(mol_features)
if self.input_protein:
proteins = featurize_protein(shard, field=self.protein_field, source_field=self.source_field,
prot_seq_dict=self.prot_seq_dict)
# Note: for ECFP with 1024 entries, mol_features is a (8192, 1024) sized array.
return np.concatenate((mol_features, proteins), axis=1), valid_inds
else:
return mol_features, valid_inds
| 34.069307 | 99 | 0.694275 | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
from rdkit import Chem
import time
import sys
import pdb
from deepchem.utils.save import log
from deepchem.utils.save import load_csv_files
from deepchem.feat import UserDefinedFeaturizer
from dcCustom.data import DiskDataset
from dcCustom.feat import Protein
def convert_df_to_numpy(df, tasks, verbose=False):
n_samples = df.shape[0]
n_tasks = len(tasks)
time1 = time.time()
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
time2 = time.time()
w = np.ones((n_samples, n_tasks))
missing = np.zeros_like(y).astype(int)
feature_shape = None
for ind in range(n_samples):
for task in range(n_tasks):
if y[ind, task] == "":
missing[ind, task] = 1
for ind in range(n_samples):
for task in range(n_tasks):
if missing[ind, task]:
y[ind, task] = 0.
w[ind, task] = 0.
return y.astype(float), w.astype(float)
def featurize_protein(df, field, source_field, prot_seq_dict, log_every_N=500, verbose=True):
elems = df[field].tolist()
sources = df[source_field].tolist()
proteins = []
for ind, prot in enumerate(elems):
source = sources[ind]
pair = (source, prot)
sequence = prot_seq_dict[pair]
proteins.append([Protein(prot, source = source, sequence = sequence)])
return np.array(proteins)
def featurize_smiles_df(df, featurizer, field, log_every_N=1000, verbose=True):
sample_elems = df[field].tolist()
features = []
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_fd = open('./logs/error.log', 'a')
os.dup2(stderr_fd.fileno(), stderr_fileno)
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
# should be correctly implemented in the future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol], smiles=elem))
stderr_fd.close()
os.dup2(stderr_save, stderr_fileno)
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
#return np.squeeze(np.array(features), axis=1), valid_inds
return np.array(features), valid_inds
def featurize_smiles_np(arr, featurizer, log_every_N=1000, verbose=True):
features = []
for ind, elem in enumerate(arr.tolist()):
mol = Chem.MolFromSmiles(elem)
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
features = np.squeeze(np.array(features))
return features.reshape(-1,)
def get_user_specified_features(df, featurizer, verbose=True):
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(
pd.to_numeric)
X_shard = df.as_matrix(columns=featurizer.feature_fields)
time2 = time.time()
log("TIMING: user specified processing took %0.3f s" % (time2 - time1),
verbose)
return X_shard
def featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000):
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
def __init__(self,
tasks,
smiles_field=None,
id_field=None,
mol_field=None,
featurizer=None,
protein_field=None,
source_field=None,
verbose=True,
prot_seq_dict=None,
log_every_n=1000,
input_protein=True):
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.verbose = verbose
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
self.mol_field = mol_field
self.protein_field = protein_field
self.source_field = source_field
self.prot_seq_dict = prot_seq_dict
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
self.input_protein = input_protein
def featurize(self, input_files, data_dir=None, shard_size=8192):
log("Loading raw samples now.", self.verbose)
log("shard_size: %d" % shard_size, self.verbose)
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(
self.get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self.featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = convert_df_to_numpy(shard, self.tasks, self.id_field)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it makes
# no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
log("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1), self.verbose)
yield X, y, w, ids
return DiskDataset.create_dataset(
shard_generator(), data_dir, self.tasks, verbose=self.verbose)
def get_shards(self, input_files, shard_size):
raise NotImplementedError
def featurize_shard(self, shard):
raise NotImplementedError
class CSVLoader(DataLoader):
def get_shards(self, input_files, shard_size, verbose=True):
return load_csv_files(input_files, shard_size, verbose=verbose)
def featurize_shard(self, shard):
mol_features, valid_inds = featurize_smiles_df(shard, self.featurizer, field=self.smiles_field)
if len(mol_features.shape) > 2:
mol_features = np.squeeze(mol_features)
if self.input_protein:
proteins = featurize_protein(shard, field=self.protein_field, source_field=self.source_field,
prot_seq_dict=self.prot_seq_dict)
# Note: for ECFP with 1024 entries, mol_features is a (8192, 1024) sized array.
return np.concatenate((mol_features, proteins), axis=1), valid_inds
else:
return mol_features, valid_inds
| true | true |
f7f4cbfd58c36403999b2ed0805de9888ac43f75 | 3,950 | py | Python | app/models/image_nd.py | lv10/ross_sea | a4d89f06ef15bc2f7008fc5859d85ad86a0cba36 | [
"MIT"
] | null | null | null | app/models/image_nd.py | lv10/ross_sea | a4d89f06ef15bc2f7008fc5859d85ad86a0cba36 | [
"MIT"
] | null | null | null | app/models/image_nd.py | lv10/ross_sea | a4d89f06ef15bc2f7008fc5859d85ad86a0cba36 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from tools import data
class ImageND(object):
SENSOR = None
def __init__(self, filename, dimensions=3):
if dimensions < 3:
print "The image doesn't have the minimum of 3 dimensions"
sys.exit(1)
self.dimensions = dimensions
self.filename = filename
self.filepath = os.path.join(data.DATA_DIR, self.filename)
self.title = filename[2:15]
def __validate(self, image):
"""
Validate image, check that's n-'dimensions' channel image
"""
if image is not None and len(image.shape) >= self.dimensions:
return True
return False
def image(self):
"""
Returns the raw ndarray image
:rtype: ndarray
"""
image = data.mat_file(self.filepath).get(self.SENSOR)
if not self.__validate(image):
print "Invalid dimensions or sensor {0} isn't in the image".format(
self.sensor)
sys.exit(1)
return np.dstack(image)
def nan_percentage(self):
nan_count = np.count_nonzero(~np.isnan(self.image()))
return (nan_count / self.image().size) * 100
def date(self):
return data.parse_date(self.filename)
def show(self, colorbar=True):
plt.imshow(self.image())
plt.title(self.filename)
if colorbar:
plt.colorbar()
plt.show()
# =====================================
# Analysis
# =====================================
def rgb(self):
"""
Return 3-tuple with (r, g, b)
"""
red = self.channel("red")
green = self.channel("green")
blue = self.channel("blue")
return (red, green, blue)
def channel(self, channel=None):
"""
This function is to be overwritten in by subclass
"""
return None
class IbandImage(ImageND):
SENSOR = "ibands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red, green, blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
class MbandImage(ImageND):
SENSOR = "mbands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 2]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 0]
else:
print "Channel requested wasn't red, green or blue"
class FcImage(ImageND):
SENSOR = "fc"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
| 24.842767 | 79 | 0.508354 | import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from tools import data
class ImageND(object):
SENSOR = None
def __init__(self, filename, dimensions=3):
if dimensions < 3:
print "The image doesn't have the minimum of 3 dimensions"
sys.exit(1)
self.dimensions = dimensions
self.filename = filename
self.filepath = os.path.join(data.DATA_DIR, self.filename)
self.title = filename[2:15]
def __validate(self, image):
"""
Validate image, check that's n-'dimensions' channel image
"""
if image is not None and len(image.shape) >= self.dimensions:
return True
return False
def image(self):
"""
Returns the raw ndarray image
:rtype: ndarray
"""
image = data.mat_file(self.filepath).get(self.SENSOR)
if not self.__validate(image):
print "Invalid dimensions or sensor {0} isn't in the image".format(
self.sensor)
sys.exit(1)
return np.dstack(image)
def nan_percentage(self):
nan_count = np.count_nonzero(~np.isnan(self.image()))
return (nan_count / self.image().size) * 100
def date(self):
return data.parse_date(self.filename)
def show(self, colorbar=True):
plt.imshow(self.image())
plt.title(self.filename)
if colorbar:
plt.colorbar()
plt.show()
# =====================================
# Analysis
# =====================================
def rgb(self):
"""
Return 3-tuple with (r, g, b)
"""
red = self.channel("red")
green = self.channel("green")
blue = self.channel("blue")
return (red, green, blue)
def channel(self, channel=None):
"""
This function is to be overwritten in by subclass
"""
return None
class IbandImage(ImageND):
SENSOR = "ibands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red, green, blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
class MbandImage(ImageND):
SENSOR = "mbands"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 2]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 0]
else:
print "Channel requested wasn't red, green or blue"
class FcImage(ImageND):
SENSOR = "fc"
def channel(self, channel=None):
"""
Returns a specific channel, the options are:
- red
- blue
:params:
:params channel: string with the specified channel
:rType: ndarray
"""
channel = channel.strip().lower()
if channel == 'red':
return self.image()[:, :, 0]
elif channel == 'green':
return self.image()[:, :, 1]
elif channel == 'blue':
return self.image()[:, :, 2]
else:
print "Channel requested wasn't red, green or blue"
| false | true |
f7f4cc4ce223b241d75cb344ef252e0d6303b292 | 458 | py | Python | DS-400/Medium/274-H-Index/Counting.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | 2 | 2020-04-24T18:36:52.000Z | 2020-04-25T00:15:57.000Z | DS-400/Medium/274-H-Index/Counting.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | DS-400/Medium/274-H-Index/Counting.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | class Solution:
def hIndex(self, citations):
n = len(citations)
# papers[i] is the number of papers with i citations.
papers = [0] * (n + 1)
for c in citations:
# All papers with citations larger than n is count as n.
papers[min(n, c)] += 1
i = n
s = papers[n] # sum of papers with citations >= i
while i > s:
i -= 1
s += papers[i]
return i
| 30.533333 | 68 | 0.49345 | class Solution:
def hIndex(self, citations):
n = len(citations)
papers = [0] * (n + 1)
for c in citations:
papers[min(n, c)] += 1
i = n
s = papers[n]
while i > s:
i -= 1
s += papers[i]
return i
| true | true |
f7f4ce2203670430de4c5248b66597fbd728c46b | 441 | py | Python | python-bindings/b2_terraform/arg_parser.py | reef-technologies/terraform-provider-b2 | 333d6146d30ce3d56b4405851b28b99b5b628eaa | [
"MIT"
] | 27 | 2020-12-18T01:04:18.000Z | 2022-03-06T08:37:14.000Z | python-bindings/b2_terraform/arg_parser.py | reef-technologies/terraform-provider-b2 | 333d6146d30ce3d56b4405851b28b99b5b628eaa | [
"MIT"
] | 25 | 2021-01-10T19:56:16.000Z | 2022-03-30T00:02:03.000Z | python-bindings/b2_terraform/arg_parser.py | reef-technologies/terraform-provider-b2 | 333d6146d30ce3d56b4405851b28b99b5b628eaa | [
"MIT"
] | 8 | 2020-11-27T16:33:55.000Z | 2022-03-26T10:48:07.000Z | ######################################################################
#
# File: python-bindings/b2_terraform/arg_parser.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
import argparse
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
| 25.941176 | 70 | 0.512472 | true | true | |
f7f4cedd52a7dcb6ed1aa094918694044a23aa26 | 11,350 | py | Python | SoftLayer/managers/load_balancer.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 126 | 2015-01-05T05:09:22.000Z | 2021-07-02T00:16:35.000Z | SoftLayer/managers/load_balancer.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 969 | 2015-01-05T15:55:31.000Z | 2022-03-31T19:55:20.000Z | SoftLayer/managers/load_balancer.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 176 | 2015-01-22T11:23:40.000Z | 2022-02-11T13:16:58.000Z | """
SoftLayer.load_balancer
~~~~~~~~~~~~~~~~~~~~~~~
Load Balancer Manager/helpers
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import exceptions
from SoftLayer.managers import ordering
from SoftLayer import utils
class LoadBalancerManager(utils.IdentifierMixin, object):
"""Manages SoftLayer load balancers.
See product information here: https://www.ibm.com/cloud/load-balancer
:param SoftLayer.API.BaseClient client: the client instance
"""
TYPE = {
1: "Public to Private",
0: "Private to Private",
2: "Public to Public",
}
def __init__(self, client):
self.client = client
self.account = self.client['Account']
self.prod_pkg = self.client['Product_Package']
# Citrix Netscalers
self.adc = self.client['Network_Application_Delivery_Controller']
# IBM CLoud LB
self.lbaas = self.client['Network_LBaaS_LoadBalancer']
self.package_keyname = 'LBAAS'
def get_adcs(self, mask=None):
"""Returns a list of all netscalers.
:returns: SoftLayer_Network_Application_Delivery_Controller[].
"""
if mask is None:
mask = 'mask[managementIpAddress,outboundPublicBandwidthUsage,primaryIpAddress,datacenter]'
return self.account.getApplicationDeliveryControllers(mask=mask)
def get_adc(self, identifier, mask=None):
"""Returns a netscaler object.
:returns: SoftLayer_Network_Application_Delivery_Controller.
"""
if mask is None:
mask = "mask[networkVlans, password, managementIpAddress, primaryIpAddress, subnets, tagReferences, " \
"licenseExpirationDate, datacenter]"
return self.adc.getObject(id=identifier, mask=mask)
def get_lbaas(self, mask=None):
"""Returns a list of IBM Cloud Loadbalancers
:returns: SoftLayer_Network_LBaaS_LoadBalancer[]
"""
if mask is None:
mask = "mask[datacenter,listenerCount,memberCount]"
this_lb = self.lbaas.getAllObjects(mask=mask)
return this_lb
def get_lb(self, identifier, mask=None):
"""Returns a IBM Cloud LoadBalancer
:returns: SoftLayer_Network_LBaaS_LoadBalancer
"""
if mask is None:
mask = "mask[healthMonitors, l7Pools, members, sslCiphers, " \
"listeners[defaultPool[healthMonitor, members, sessionAffinity],l7Policies]]"
this_lb = self.lbaas.getObject(id=identifier, mask=mask)
health = self.lbaas.getLoadBalancerMemberHealth(this_lb.get('uuid'))
this_lb['health'] = health
return this_lb
def update_lb_health_monitors(self, uuid, checks):
"""calls SoftLayer_Network_LBaaS_HealthMonitor::updateLoadBalancerHealthMonitors()
- `updateLoadBalancerHealthMonitors <https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_\
HealthMonitor/updateLoadBalancerHealthMonitors/>`_
- `SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration <https://sldn.softlayer.com/reference/\
datatypes/SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration/>`_
:param uuid: loadBalancerUuid
:param checks list: SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration[]
"""
# return self.lbaas.updateLoadBalancerHealthMonitors(uuid, checks)
return self.client.call('SoftLayer_Network_LBaaS_HealthMonitor', 'updateLoadBalancerHealthMonitors',
uuid, checks)
def get_lbaas_uuid_id(self, identifier):
"""Gets a LBaaS uuid, id. Since sometimes you need one or the other.
:param identifier: either the LB Id, UUID or Name, this function will return UUI and LB Id.
:return (uuid, id):
"""
mask = "mask[id,uuid]"
if isinstance(identifier, int) or identifier.isdigit():
this_lb = self.lbaas.getObject(id=identifier, mask=mask)
elif len(identifier) == 36 and utils.UUID_RE.match(identifier):
this_lb = self.lbaas.getLoadBalancer(identifier, mask=mask)
else:
this_lb = self.get_lbaas_by_name(identifier, mask=mask)
return this_lb.get('uuid'), this_lb.get('id')
def get_lbaas_by_name(self, name, mask=None):
"""Gets a LBaaS by name.
:param name: Name of the LBaaS instance
:param mask:
:returns: SoftLayer_Network_LBaaS_LoadBalancer.
"""
object_filter = {'name': {'operation': name}}
this_lbs = self.lbaas.getAllObjects(filter=object_filter, mask=mask)
if not this_lbs:
raise exceptions.SoftLayerError("Unable to find LBaaS with name: {}".format(name))
return this_lbs[0]
def delete_lb_member(self, identifier, member_id):
"""Removes a member from a LBaaS instance
https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_Member/deleteLoadBalancerMembers/
:param identifier: UUID of the LBaaS instance
:param member_id: Member UUID to remove.
"""
return self.client.call('SoftLayer_Network_LBaaS_Member', 'deleteLoadBalancerMembers',
identifier, [member_id])
def add_lb_member(self, identifier, service_info):
"""Adds a member to a LBaaS instance
https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_Member/deleteLoadBalancerMembers/
:param identifier: UUID of the LBaaS instance
:param service_info: datatypes/SoftLayer_Network_LBaaS_LoadBalancerServerInstanceInfo
"""
return self.client.call('SoftLayer_Network_LBaaS_Member', 'addLoadBalancerMembers',
identifier, [service_info])
def add_lb_listener(self, identifier, listener):
"""Adds or update a listener to a LBaaS instance
When using this to update a listener, just include the 'listenerUuid' in the listener object
See the following for listener configuration options
https://sldn.softlayer.com/reference/datatypes/SoftLayer_Network_LBaaS_LoadBalancerProtocolConfiguration/
:param identifier: UUID of the LBaaS instance
:param listener: Object with all listener configurations
"""
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'updateLoadBalancerProtocols',
identifier, [listener])
def get_l7policies(self, identifier):
"""Gets Layer7 policies from a listener
:param identifier: id
"""
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'getL7Policies', id=identifier)
def get_all_l7policies(self):
"""Gets all Layer7 policies
:returns: Dictionary of (protocol_id: policies list).
"""
mask = 'mask[listeners[l7Policies]]'
lbaas = self.get_lbaas(mask=mask)
listeners = []
for load_bal in lbaas:
listeners.extend(load_bal.get('listeners'))
policies = {}
for protocol in listeners:
if protocol.get('l7Policies'):
listener_id = protocol.get('id')
l7policies = protocol.get('l7Policies')
policies[listener_id] = l7policies
return policies
def add_lb_l7_pool(self, identifier, pool, members, health, session):
"""Creates a new l7 pool for a LBaaS instance
- https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_L7Pool/createL7Pool/
- https://cloud.ibm.com/docs/infrastructure/loadbalancer-service?topic=loadbalancer-service-api-reference
:param identifier: UUID of the LBaaS instance
:param pool SoftLayer_Network_LBaaS_L7Pool: Description of the pool
:param members SoftLayer_Network_LBaaS_L7Member[]: Array of servers with their address, port, weight
:param monitor SoftLayer_Network_LBaaS_L7HealthMonitor: A health monitor
:param session SoftLayer_Network_LBaaS_L7SessionAffinity: Weather to use affinity
"""
return self.client.call('SoftLayer_Network_LBaaS_L7Pool', 'createL7Pool',
identifier, pool, members, health, session)
def del_lb_l7_pool(self, identifier):
"""Deletes a l7 pool
:param identifier: Id of the L7Pool
"""
return self.client.call('SoftLayer_Network_LBaaS_L7Pool', 'deleteObject', id=identifier)
def remove_lb_listener(self, identifier, listener):
"""Removes a listener to a LBaaS instance
:param identifier: UUID of the LBaaS instance
:param listener: UUID of the Listner to be removed.
"""
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'deleteLoadBalancerProtocols',
identifier, [listener])
def order_lbaas(self, datacenter, name, desc, protocols, subnet_id, public=False, verify=False):
"""Allows to order a Load Balancer
:param datacenter: Shortname for the SoftLayer datacenter to order in.
:param name: Identifier for the new LB.
:param desc: Optional description for the lb.
:param protocols: https://sldn.softlayer.com/reference/datatypes/SoftLayer_Network_LBaaS_Listener/
:param subnet_id: Id of the subnet for this new LB to live on.
:param public: Use Public side for the backend.
:param verify: Don't actually order if True.
"""
order_mgr = ordering.OrderingManager(self.client)
package = order_mgr.get_package_by_key(self.package_keyname, mask='mask[id,keyName,itemPrices]')
prices = []
for price in package.get('itemPrices'):
if not price.get('locationGroupId', False):
prices.append(price.get('id'))
# Build the configuration of the order
order_data = {
'complexType': 'SoftLayer_Container_Product_Order_Network_LoadBalancer_AsAService',
'name': name,
'description': desc,
'location': datacenter,
'packageId': package.get('id'),
'useHourlyPricing': True, # Required since LBaaS is an hourly service
'prices': [{'id': price_id} for price_id in prices],
'protocolConfigurations': protocols,
'subnets': [{'id': subnet_id}],
'isPublic': public
}
if verify:
response = self.client['Product_Order'].verifyOrder(order_data)
else:
response = self.client['Product_Order'].placeOrder(order_data)
return response
def lbaas_order_options(self):
"""Gets the options to order a LBaaS instance."""
_filter = {'keyName': {'operation': self.package_keyname}}
mask = "mask[id,keyName,name,items[prices],regions[location[location[groups]]]]"
package = self.client.call('SoftLayer_Product_Package', 'getAllObjects', filter=_filter, mask=mask)
return package.pop()
def cancel_lbaas(self, uuid):
"""Cancels a LBaaS instance.
https://sldn.softlayer.com/reference/services/SoftLayer_Network_LBaaS_LoadBalancer/cancelLoadBalancer/
:param uuid string: UUID of the LBaaS instance to cancel
"""
return self.lbaas.cancelLoadBalancer(uuid)
| 40.827338 | 116 | 0.664229 | from SoftLayer import exceptions
from SoftLayer.managers import ordering
from SoftLayer import utils
class LoadBalancerManager(utils.IdentifierMixin, object):
TYPE = {
1: "Public to Private",
0: "Private to Private",
2: "Public to Public",
}
def __init__(self, client):
self.client = client
self.account = self.client['Account']
self.prod_pkg = self.client['Product_Package']
self.adc = self.client['Network_Application_Delivery_Controller']
self.lbaas = self.client['Network_LBaaS_LoadBalancer']
self.package_keyname = 'LBAAS'
def get_adcs(self, mask=None):
if mask is None:
mask = 'mask[managementIpAddress,outboundPublicBandwidthUsage,primaryIpAddress,datacenter]'
return self.account.getApplicationDeliveryControllers(mask=mask)
def get_adc(self, identifier, mask=None):
if mask is None:
mask = "mask[networkVlans, password, managementIpAddress, primaryIpAddress, subnets, tagReferences, " \
"licenseExpirationDate, datacenter]"
return self.adc.getObject(id=identifier, mask=mask)
def get_lbaas(self, mask=None):
if mask is None:
mask = "mask[datacenter,listenerCount,memberCount]"
this_lb = self.lbaas.getAllObjects(mask=mask)
return this_lb
def get_lb(self, identifier, mask=None):
if mask is None:
mask = "mask[healthMonitors, l7Pools, members, sslCiphers, " \
"listeners[defaultPool[healthMonitor, members, sessionAffinity],l7Policies]]"
this_lb = self.lbaas.getObject(id=identifier, mask=mask)
health = self.lbaas.getLoadBalancerMemberHealth(this_lb.get('uuid'))
this_lb['health'] = health
return this_lb
def update_lb_health_monitors(self, uuid, checks):
return self.client.call('SoftLayer_Network_LBaaS_HealthMonitor', 'updateLoadBalancerHealthMonitors',
uuid, checks)
def get_lbaas_uuid_id(self, identifier):
mask = "mask[id,uuid]"
if isinstance(identifier, int) or identifier.isdigit():
this_lb = self.lbaas.getObject(id=identifier, mask=mask)
elif len(identifier) == 36 and utils.UUID_RE.match(identifier):
this_lb = self.lbaas.getLoadBalancer(identifier, mask=mask)
else:
this_lb = self.get_lbaas_by_name(identifier, mask=mask)
return this_lb.get('uuid'), this_lb.get('id')
def get_lbaas_by_name(self, name, mask=None):
object_filter = {'name': {'operation': name}}
this_lbs = self.lbaas.getAllObjects(filter=object_filter, mask=mask)
if not this_lbs:
raise exceptions.SoftLayerError("Unable to find LBaaS with name: {}".format(name))
return this_lbs[0]
def delete_lb_member(self, identifier, member_id):
return self.client.call('SoftLayer_Network_LBaaS_Member', 'deleteLoadBalancerMembers',
identifier, [member_id])
def add_lb_member(self, identifier, service_info):
return self.client.call('SoftLayer_Network_LBaaS_Member', 'addLoadBalancerMembers',
identifier, [service_info])
def add_lb_listener(self, identifier, listener):
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'updateLoadBalancerProtocols',
identifier, [listener])
def get_l7policies(self, identifier):
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'getL7Policies', id=identifier)
def get_all_l7policies(self):
mask = 'mask[listeners[l7Policies]]'
lbaas = self.get_lbaas(mask=mask)
listeners = []
for load_bal in lbaas:
listeners.extend(load_bal.get('listeners'))
policies = {}
for protocol in listeners:
if protocol.get('l7Policies'):
listener_id = protocol.get('id')
l7policies = protocol.get('l7Policies')
policies[listener_id] = l7policies
return policies
def add_lb_l7_pool(self, identifier, pool, members, health, session):
return self.client.call('SoftLayer_Network_LBaaS_L7Pool', 'createL7Pool',
identifier, pool, members, health, session)
def del_lb_l7_pool(self, identifier):
return self.client.call('SoftLayer_Network_LBaaS_L7Pool', 'deleteObject', id=identifier)
def remove_lb_listener(self, identifier, listener):
return self.client.call('SoftLayer_Network_LBaaS_Listener', 'deleteLoadBalancerProtocols',
identifier, [listener])
def order_lbaas(self, datacenter, name, desc, protocols, subnet_id, public=False, verify=False):
order_mgr = ordering.OrderingManager(self.client)
package = order_mgr.get_package_by_key(self.package_keyname, mask='mask[id,keyName,itemPrices]')
prices = []
for price in package.get('itemPrices'):
if not price.get('locationGroupId', False):
prices.append(price.get('id'))
order_data = {
'complexType': 'SoftLayer_Container_Product_Order_Network_LoadBalancer_AsAService',
'name': name,
'description': desc,
'location': datacenter,
'packageId': package.get('id'),
'useHourlyPricing': True,
'prices': [{'id': price_id} for price_id in prices],
'protocolConfigurations': protocols,
'subnets': [{'id': subnet_id}],
'isPublic': public
}
if verify:
response = self.client['Product_Order'].verifyOrder(order_data)
else:
response = self.client['Product_Order'].placeOrder(order_data)
return response
def lbaas_order_options(self):
_filter = {'keyName': {'operation': self.package_keyname}}
mask = "mask[id,keyName,name,items[prices],regions[location[location[groups]]]]"
package = self.client.call('SoftLayer_Product_Package', 'getAllObjects', filter=_filter, mask=mask)
return package.pop()
def cancel_lbaas(self, uuid):
return self.lbaas.cancelLoadBalancer(uuid)
| true | true |
f7f4cfec1c6450d6e7c30d5c08af076255c16635 | 16,527 | py | Python | catalyst/callbacks/metric.py | otherman16/catalyst | ccef2c7de7ff3869523a86f291b6a2390308bad5 | [
"Apache-2.0"
] | 1 | 2020-11-14T13:35:22.000Z | 2020-11-14T13:35:22.000Z | catalyst/callbacks/metric.py | otherman16/catalyst | ccef2c7de7ff3869523a86f291b6a2390308bad5 | [
"Apache-2.0"
] | null | null | null | catalyst/callbacks/metric.py | otherman16/catalyst | ccef2c7de7ff3869523a86f291b6a2390308bad5 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Callable, Dict, List, TYPE_CHECKING, Union
from abc import ABC, abstractmethod
from collections import defaultdict
import logging
import numpy as np
import torch
from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
from catalyst.tools.meters.averagevaluemeter import AverageValueMeter
from catalyst.utils.distributed import get_distributed_mean
from catalyst.utils.misc import get_dictkey_auto_fn
if TYPE_CHECKING:
from catalyst.core.runner import IRunner
logger = logging.getLogger(__name__)
class IMetricCallback(ABC, Callback):
"""Callback abstraction for metric computation."""
def __init__(
self,
prefix: str,
input_key: Union[str, List[str], Dict[str, str]] = "targets",
output_key: Union[str, List[str], Dict[str, str]] = "logits",
multiplier: float = 1.0,
**metrics_kwargs,
):
"""
Args:
prefix: key prefix to store computed
batch/loader/epoch metrics
input_key: input key to use for metric calculation;
specifies our `y_true`
output_key: output key to use for metric calculation;
specifies our `y_pred`
multiplier: scalar for metric reweighting
**metrics_kwargs: extra metric params
to pass for metric computation
"""
super().__init__(order=CallbackOrder.metric, node=CallbackNode.all)
self.prefix = prefix
self.input_key = input_key
self.output_key = output_key
self.multiplier = multiplier
self.metrics_kwargs = metrics_kwargs
self._get_input = get_dictkey_auto_fn(self.input_key)
self._get_output = get_dictkey_auto_fn(self.output_key)
kv_types = (dict, tuple, list, type(None))
is_value_input = (
isinstance(self.input_key, str) and self.input_key != "__all__"
)
is_value_output = (
isinstance(self.output_key, str) and self.output_key != "__all__"
)
is_kv_input = (
isinstance(self.input_key, kv_types) or self.input_key == "__all__"
)
is_kv_output = (
isinstance(self.output_key, kv_types)
or self.output_key == "__all__"
)
if hasattr(self, "_compute_metric"):
pass # overridden in descendants
elif is_value_input and is_value_output:
self._compute_metric = self._compute_metric_value
elif is_kv_input and is_kv_output:
self._compute_metric = self._compute_metric_key_value
else:
raise NotImplementedError()
@property
@abstractmethod
def metric_fn(self):
"""Specifies used metric function."""
pass
def _compute_metric_value(self, output: Dict, input: Dict):
"""
Compute metric for value-based case.
For example accuracy on `y_pred` and `y_true`.
Args:
output: dictionary with output (`y_pred`) values
for metric computation
input: dictionary with input (`y_true`) values
for metric computation
Returns:
computed metric
"""
output = self._get_output(output, self.output_key)
input = self._get_input(input, self.input_key)
metric = self.metric_fn(output, input, **self.metrics_kwargs)
return metric
def _compute_metric_key_value(self, output: Dict, input: Dict):
"""
Compute metric for key-value-based case.
For example accuracy on `y_pred` and `y_true` and `sample_weights`.
Args:
output: dictionary with output (`y_pred`) values
for metric computation
input: dictionary with input (`y_true`, `sample_weights`)
values for metric computation
Returns:
computed metric
"""
output = self._get_output(output, self.output_key)
input = self._get_input(input, self.input_key)
metric = self.metric_fn(**output, **input, **self.metrics_kwargs)
return metric
def _process_computed_metric(self, metric: Union[Dict, float]) -> Dict:
"""
Process metric for key-value-based logging.
Scales by `multiplier`, add appropriate naming.
Args:
metric:
Returns:
Dict: processed scaled metric(s) with names
"""
if isinstance(metric, dict):
metric = {
f"{self.prefix}{key}": value * self.multiplier
for key, value in metric.items()
}
elif isinstance(metric, (float, int, torch.Tensor)):
metric = {f"{self.prefix}": metric * self.multiplier}
else:
raise NotImplementedError()
return metric
class IBatchMetricCallback(IMetricCallback):
"""
Batch-based metric callback.
Computes metric on batch and saves for logging.
"""
def on_batch_end(self, runner: "IRunner") -> None:
"""Computes metrics and add them to batch metrics."""
metrics = self._compute_metric(runner.output, runner.input)
metrics = self._process_computed_metric(metrics)
runner.batch_metrics.update(**metrics)
class ILoaderMetricCallback(IMetricCallback):
"""
Loader-based metric callback.
Stores input/output values during loaders run
and computes metric in the end.
"""
def __init__(self, **kwargs):
"""Init.
Args:
**kwargs: `IMetricCallback` params.
"""
super().__init__(**kwargs)
self.input = defaultdict(lambda: [])
self.output = defaultdict(lambda: [])
def on_loader_start(self, runner: "IRunner"):
"""Reinitialises internal storage."""
self.input = defaultdict(lambda: [])
self.output = defaultdict(lambda: [])
def on_batch_end(self, runner: "IRunner") -> None:
"""Stores new input/output for the metric computation."""
output = self._get_output(runner.output, self.output_key)
input = self._get_input(runner.input, self.input_key)
for data, storage in zip((input, output), (self.input, self.output)):
if isinstance(data, dict):
for key, value in data.items():
storage[key].append(value.detach().cpu().numpy())
else:
storage["_data"].append(data.detach().cpu().numpy())
def on_loader_end(self, runner: "IRunner"):
"""Computes loader-based metric.
Args:
runner: current runner
"""
input = {
key: torch.from_numpy(np.concatenate(self.input[key], axis=0))
for key in self.input
}
output = {
key: torch.from_numpy(np.concatenate(self.output[key], axis=0))
for key in self.output
}
input = {self.input_key: input["_data"]} if len(input) == 1 else input
output = (
{self.output_key: output["_data"]} if len(output) == 1 else output
)
metrics = self._compute_metric(output, input)
metrics = self._process_computed_metric(metrics)
runner.loader_metrics.update(**metrics)
class BatchMetricCallback(IBatchMetricCallback):
"""A callback that returns single metric on `runner.on_batch_end`."""
def __init__(
self,
prefix: str,
metric_fn: Callable,
input_key: Union[str, List[str], Dict[str, str]] = "targets",
output_key: Union[str, List[str], Dict[str, str]] = "logits",
multiplier: float = 1.0,
**metric_kwargs,
):
"""Init.
Args:
prefix: key prefix to store computed
batch/loader/epoch metrics
input_key: input key to use for metric calculation;
specifies our `y_true`
output_key: output key to use for metric calculation;
specifies our `y_pred`
multiplier: scalar for metric reweighting
**metrics_kwargs: extra metric params
to pass for metric computation
"""
super().__init__(
prefix=prefix,
input_key=input_key,
output_key=output_key,
multiplier=multiplier,
**metric_kwargs,
)
self.metric = metric_fn
@property
def metric_fn(self):
"""Specifies used metric function."""
return self.metric
class LoaderMetricCallback(ILoaderMetricCallback):
"""A callback that returns single metric on `runner.on_batch_end`."""
def __init__(
self,
prefix: str,
metric_fn: Callable,
input_key: Union[str, List[str], Dict[str, str]] = "targets",
output_key: Union[str, List[str], Dict[str, str]] = "logits",
multiplier: float = 1.0,
**metric_kwargs,
):
"""Init.
Args:
prefix: key prefix to store computed
batch/loader/epoch metrics
input_key: input key to use for metric calculation;
specifies our `y_true`
output_key: output key to use for metric calculation;
specifies our `y_pred`
multiplier: scalar for metric reweighting
**metrics_kwargs: extra metric params
to pass for metric computation
"""
super().__init__(
prefix=prefix,
input_key=input_key,
output_key=output_key,
multiplier=multiplier,
**metric_kwargs,
)
self.metric = metric_fn
@property
def metric_fn(self):
"""Specifies used metric function."""
return self.metric
class MetricAggregationCallback(Callback):
"""A callback to aggregate several metrics in one value."""
def __init__(
self,
prefix: str,
metrics: Union[str, List[str], Dict[str, float]] = None,
mode: str = "mean",
scope: str = "batch",
multiplier: float = 1.0,
) -> None:
"""
Args:
prefix: new key for aggregated metric.
metrics (Union[str, List[str], Dict[str, float]]): If not None,
it aggregates only the values from the metric by these keys.
for ``weighted_sum`` aggregation it must be a Dict[str, float].
mode: function for aggregation.
Must be either ``sum``, ``mean`` or ``weighted_sum``.
multiplier: scale factor for the aggregated metric.
"""
super().__init__(
order=CallbackOrder.metric_aggregation, node=CallbackNode.all
)
if prefix is None or not isinstance(prefix, str):
raise ValueError("prefix must be str")
if mode in ("sum", "mean"):
if metrics is not None and not isinstance(metrics, list):
raise ValueError(
"For `sum` or `mean` mode the metrics must be "
"None or list or str (not dict)"
)
elif mode in ("weighted_sum", "weighted_mean"):
if metrics is None or not isinstance(metrics, dict):
raise ValueError(
"For `weighted_sum` or `weighted_mean` mode "
"the metrics must be specified "
"and must be a dict"
)
else:
raise NotImplementedError(
"mode must be `sum`, `mean` "
"or `weighted_sum` or `weighted_mean`"
)
assert scope in ("batch", "loader", "epoch")
if isinstance(metrics, str):
metrics = [metrics]
self.prefix = prefix
self.metrics = metrics
self.mode = mode
self.scope = scope
self.multiplier = multiplier
if mode in ("sum", "weighted_sum", "weighted_mean"):
self.aggregation_fn = (
lambda x: torch.sum(torch.stack(x)) * multiplier
)
if mode == "weighted_mean":
weights_sum = sum(metrics.items())
self.metrics = {
key: weight / weights_sum
for key, weight in metrics.items()
}
elif mode == "mean":
self.aggregation_fn = (
lambda x: torch.mean(torch.stack(x)) * multiplier
)
def _preprocess(self, metrics: Any) -> List[float]:
if self.metrics is not None:
if self.mode == "weighted_sum":
result = [
metrics[key] * value for key, value in self.metrics.items()
]
else:
result = [metrics[key] for key in self.metrics]
else:
result = list(metrics.values())
return result
def _process_metrics(self, metrics: Dict):
metrics_processed = self._preprocess(metrics)
metric_aggregated = self.aggregation_fn(metrics_processed)
metrics[self.prefix] = metric_aggregated
def on_batch_end(self, runner: "IRunner") -> None:
"""Computes the metric and add it to the batch metrics.
Args:
runner: current runner
"""
if self.scope == "batch":
self._process_metrics(runner.batch_metrics)
def on_loader_end(self, runner: "IRunner"):
"""Computes the metric and add it to the loader metrics.
Args:
runner: current runner
"""
if self.scope == "loader":
self._process_metrics(runner.loader_metrics)
def on_epoch_end(self, runner: "IRunner"):
"""Computes the metric and add it to the epoch metrics.
Args:
runner: current runner
"""
if self.scope == "epoch":
self._process_metrics(runner.epoch_metrics)
class MetricManagerCallback(Callback):
"""
Prepares metrics for logging, transferring values from PyTorch to numpy.
"""
def __init__(self):
"""Init."""
super().__init__(
order=CallbackOrder.logging - 1, node=CallbackNode.all,
)
self.meters: Dict[str, AverageValueMeter] = None
@staticmethod
def to_single_value(value: Any) -> float:
"""Convert any value to float.
Args:
value: some value
Returns:
result
"""
if hasattr(value, "item"):
value = value.item()
value = float(value)
return value
@staticmethod
def _process_metrics(metrics: Dict[str, Any]):
output = {}
for key, value in metrics.items():
value = get_distributed_mean(value)
value = MetricManagerCallback.to_single_value(value)
output[key] = value
return output
def on_epoch_start(self, runner: "IRunner") -> None:
"""Epoch start hook.
Args:
runner: current runner
"""
runner.epoch_metrics = defaultdict(None)
def on_loader_start(self, runner: "IRunner") -> None:
"""Loader start hook.
Args:
runner: current runner
"""
runner.loader_metrics = defaultdict(None)
self.meters = defaultdict(AverageValueMeter)
def on_batch_start(self, runner: "IRunner") -> None:
"""Batch start hook.
Args:
runner: current runner
"""
runner.batch_metrics = defaultdict(None)
def on_batch_end(self, runner: "IRunner") -> None:
"""Batch end hook.
Args:
runner: current runner
"""
runner.batch_metrics = self._process_metrics(runner.batch_metrics)
for key, value in runner.batch_metrics.items():
self.meters[key].add(value, runner.batch_size)
def on_loader_end(self, runner: "IRunner") -> None:
"""Loader end hook.
Args:
runner: current runner
"""
for key, value in self.meters.items():
value = value.mean
runner.loader_metrics[key] = value
for key, value in runner.loader_metrics.items():
runner.epoch_metrics[f"{runner.loader_key}_{key}"] = value
# backward compatibility
MetricCallback = BatchMetricCallback
__all__ = [
"IMetricCallback",
"IBatchMetricCallback",
"ILoaderMetricCallback",
"BatchMetricCallback",
"LoaderMetricCallback",
"MetricCallback",
"MetricAggregationCallback",
"MetricManagerCallback",
]
| 31.967118 | 79 | 0.582864 | from typing import Any, Callable, Dict, List, TYPE_CHECKING, Union
from abc import ABC, abstractmethod
from collections import defaultdict
import logging
import numpy as np
import torch
from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
from catalyst.tools.meters.averagevaluemeter import AverageValueMeter
from catalyst.utils.distributed import get_distributed_mean
from catalyst.utils.misc import get_dictkey_auto_fn
if TYPE_CHECKING:
from catalyst.core.runner import IRunner
logger = logging.getLogger(__name__)
class IMetricCallback(ABC, Callback):
def __init__(
self,
prefix: str,
input_key: Union[str, List[str], Dict[str, str]] = "targets",
output_key: Union[str, List[str], Dict[str, str]] = "logits",
multiplier: float = 1.0,
**metrics_kwargs,
):
super().__init__(order=CallbackOrder.metric, node=CallbackNode.all)
self.prefix = prefix
self.input_key = input_key
self.output_key = output_key
self.multiplier = multiplier
self.metrics_kwargs = metrics_kwargs
self._get_input = get_dictkey_auto_fn(self.input_key)
self._get_output = get_dictkey_auto_fn(self.output_key)
kv_types = (dict, tuple, list, type(None))
is_value_input = (
isinstance(self.input_key, str) and self.input_key != "__all__"
)
is_value_output = (
isinstance(self.output_key, str) and self.output_key != "__all__"
)
is_kv_input = (
isinstance(self.input_key, kv_types) or self.input_key == "__all__"
)
is_kv_output = (
isinstance(self.output_key, kv_types)
or self.output_key == "__all__"
)
if hasattr(self, "_compute_metric"):
pass
elif is_value_input and is_value_output:
self._compute_metric = self._compute_metric_value
elif is_kv_input and is_kv_output:
self._compute_metric = self._compute_metric_key_value
else:
raise NotImplementedError()
@property
@abstractmethod
def metric_fn(self):
pass
def _compute_metric_value(self, output: Dict, input: Dict):
output = self._get_output(output, self.output_key)
input = self._get_input(input, self.input_key)
metric = self.metric_fn(output, input, **self.metrics_kwargs)
return metric
def _compute_metric_key_value(self, output: Dict, input: Dict):
output = self._get_output(output, self.output_key)
input = self._get_input(input, self.input_key)
metric = self.metric_fn(**output, **input, **self.metrics_kwargs)
return metric
def _process_computed_metric(self, metric: Union[Dict, float]) -> Dict:
if isinstance(metric, dict):
metric = {
f"{self.prefix}{key}": value * self.multiplier
for key, value in metric.items()
}
elif isinstance(metric, (float, int, torch.Tensor)):
metric = {f"{self.prefix}": metric * self.multiplier}
else:
raise NotImplementedError()
return metric
class IBatchMetricCallback(IMetricCallback):
def on_batch_end(self, runner: "IRunner") -> None:
metrics = self._compute_metric(runner.output, runner.input)
metrics = self._process_computed_metric(metrics)
runner.batch_metrics.update(**metrics)
class ILoaderMetricCallback(IMetricCallback):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input = defaultdict(lambda: [])
self.output = defaultdict(lambda: [])
def on_loader_start(self, runner: "IRunner"):
self.input = defaultdict(lambda: [])
self.output = defaultdict(lambda: [])
def on_batch_end(self, runner: "IRunner") -> None:
output = self._get_output(runner.output, self.output_key)
input = self._get_input(runner.input, self.input_key)
for data, storage in zip((input, output), (self.input, self.output)):
if isinstance(data, dict):
for key, value in data.items():
storage[key].append(value.detach().cpu().numpy())
else:
storage["_data"].append(data.detach().cpu().numpy())
def on_loader_end(self, runner: "IRunner"):
input = {
key: torch.from_numpy(np.concatenate(self.input[key], axis=0))
for key in self.input
}
output = {
key: torch.from_numpy(np.concatenate(self.output[key], axis=0))
for key in self.output
}
input = {self.input_key: input["_data"]} if len(input) == 1 else input
output = (
{self.output_key: output["_data"]} if len(output) == 1 else output
)
metrics = self._compute_metric(output, input)
metrics = self._process_computed_metric(metrics)
runner.loader_metrics.update(**metrics)
class BatchMetricCallback(IBatchMetricCallback):
def __init__(
self,
prefix: str,
metric_fn: Callable,
input_key: Union[str, List[str], Dict[str, str]] = "targets",
output_key: Union[str, List[str], Dict[str, str]] = "logits",
multiplier: float = 1.0,
**metric_kwargs,
):
super().__init__(
prefix=prefix,
input_key=input_key,
output_key=output_key,
multiplier=multiplier,
**metric_kwargs,
)
self.metric = metric_fn
@property
def metric_fn(self):
return self.metric
class LoaderMetricCallback(ILoaderMetricCallback):
def __init__(
self,
prefix: str,
metric_fn: Callable,
input_key: Union[str, List[str], Dict[str, str]] = "targets",
output_key: Union[str, List[str], Dict[str, str]] = "logits",
multiplier: float = 1.0,
**metric_kwargs,
):
super().__init__(
prefix=prefix,
input_key=input_key,
output_key=output_key,
multiplier=multiplier,
**metric_kwargs,
)
self.metric = metric_fn
@property
def metric_fn(self):
return self.metric
class MetricAggregationCallback(Callback):
def __init__(
self,
prefix: str,
metrics: Union[str, List[str], Dict[str, float]] = None,
mode: str = "mean",
scope: str = "batch",
multiplier: float = 1.0,
) -> None:
super().__init__(
order=CallbackOrder.metric_aggregation, node=CallbackNode.all
)
if prefix is None or not isinstance(prefix, str):
raise ValueError("prefix must be str")
if mode in ("sum", "mean"):
if metrics is not None and not isinstance(metrics, list):
raise ValueError(
"For `sum` or `mean` mode the metrics must be "
"None or list or str (not dict)"
)
elif mode in ("weighted_sum", "weighted_mean"):
if metrics is None or not isinstance(metrics, dict):
raise ValueError(
"For `weighted_sum` or `weighted_mean` mode "
"the metrics must be specified "
"and must be a dict"
)
else:
raise NotImplementedError(
"mode must be `sum`, `mean` "
"or `weighted_sum` or `weighted_mean`"
)
assert scope in ("batch", "loader", "epoch")
if isinstance(metrics, str):
metrics = [metrics]
self.prefix = prefix
self.metrics = metrics
self.mode = mode
self.scope = scope
self.multiplier = multiplier
if mode in ("sum", "weighted_sum", "weighted_mean"):
self.aggregation_fn = (
lambda x: torch.sum(torch.stack(x)) * multiplier
)
if mode == "weighted_mean":
weights_sum = sum(metrics.items())
self.metrics = {
key: weight / weights_sum
for key, weight in metrics.items()
}
elif mode == "mean":
self.aggregation_fn = (
lambda x: torch.mean(torch.stack(x)) * multiplier
)
def _preprocess(self, metrics: Any) -> List[float]:
if self.metrics is not None:
if self.mode == "weighted_sum":
result = [
metrics[key] * value for key, value in self.metrics.items()
]
else:
result = [metrics[key] for key in self.metrics]
else:
result = list(metrics.values())
return result
def _process_metrics(self, metrics: Dict):
metrics_processed = self._preprocess(metrics)
metric_aggregated = self.aggregation_fn(metrics_processed)
metrics[self.prefix] = metric_aggregated
def on_batch_end(self, runner: "IRunner") -> None:
if self.scope == "batch":
self._process_metrics(runner.batch_metrics)
def on_loader_end(self, runner: "IRunner"):
if self.scope == "loader":
self._process_metrics(runner.loader_metrics)
def on_epoch_end(self, runner: "IRunner"):
if self.scope == "epoch":
self._process_metrics(runner.epoch_metrics)
class MetricManagerCallback(Callback):
def __init__(self):
super().__init__(
order=CallbackOrder.logging - 1, node=CallbackNode.all,
)
self.meters: Dict[str, AverageValueMeter] = None
@staticmethod
def to_single_value(value: Any) -> float:
if hasattr(value, "item"):
value = value.item()
value = float(value)
return value
@staticmethod
def _process_metrics(metrics: Dict[str, Any]):
output = {}
for key, value in metrics.items():
value = get_distributed_mean(value)
value = MetricManagerCallback.to_single_value(value)
output[key] = value
return output
def on_epoch_start(self, runner: "IRunner") -> None:
runner.epoch_metrics = defaultdict(None)
def on_loader_start(self, runner: "IRunner") -> None:
runner.loader_metrics = defaultdict(None)
self.meters = defaultdict(AverageValueMeter)
def on_batch_start(self, runner: "IRunner") -> None:
runner.batch_metrics = defaultdict(None)
def on_batch_end(self, runner: "IRunner") -> None:
runner.batch_metrics = self._process_metrics(runner.batch_metrics)
for key, value in runner.batch_metrics.items():
self.meters[key].add(value, runner.batch_size)
def on_loader_end(self, runner: "IRunner") -> None:
for key, value in self.meters.items():
value = value.mean
runner.loader_metrics[key] = value
for key, value in runner.loader_metrics.items():
runner.epoch_metrics[f"{runner.loader_key}_{key}"] = value
MetricCallback = BatchMetricCallback
__all__ = [
"IMetricCallback",
"IBatchMetricCallback",
"ILoaderMetricCallback",
"BatchMetricCallback",
"LoaderMetricCallback",
"MetricCallback",
"MetricAggregationCallback",
"MetricManagerCallback",
]
| true | true |
f7f4d0076679114fa6b234a7a13712c5c29bf2c5 | 6,458 | py | Python | test/test_id_parsers.py | RalfG/pyAscore | 9467276f22d230369b24fd56cd69eccb9e82d51c | [
"MIT"
] | 6 | 2021-07-27T10:15:33.000Z | 2022-03-25T18:27:54.000Z | test/test_id_parsers.py | RalfG/pyAscore | 9467276f22d230369b24fd56cd69eccb9e82d51c | [
"MIT"
] | 7 | 2021-07-26T11:56:52.000Z | 2022-03-12T00:13:48.000Z | test/test_id_parsers.py | RalfG/pyAscore | 9467276f22d230369b24fd56cd69eccb9e82d51c | [
"MIT"
] | 1 | 2021-08-03T14:53:15.000Z | 2021-08-03T14:53:15.000Z | import unittest
import os
import pickle
from itertools import product
from pyascore import id_parsers
import numpy as np
from pyteomics import mass
STD_AA_MASS = mass.std_aa_mass
class TestMassCorrector(unittest.TestCase):
corrector = id_parsers.MassCorrector()
def test_n_term(self):
res = "X"
mass = 42.010565
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 0, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
('n', 0, mass)
)
res = "M"
n_mod_mass = 42.010565
mass = STD_AA_MASS[res] + n_mod_mass
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 1, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
('n', 0, n_mod_mass)
)
def test_n_term_combined(self):
res = "M"
n_mod_mass = 42.010565
oxi_mass = 15.9949
mass = STD_AA_MASS[res] + n_mod_mass + oxi_mass
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 1, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
('n', 0, n_mod_mass)
)
self.assertEqual(
(c_res[1], c_pos[1], c_mass[1]),
('M', 1, oxi_mass)
)
def test_res(self):
res = "S"
phospho_mass = 79.966331
mass = STD_AA_MASS[res] + phospho_mass
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 5, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
(res, 5, phospho_mass)
)
def test_not_found(self):
res = "M"
phospho_mass = 79.966331
mass = STD_AA_MASS[res] + phospho_mass
for i in range(6):
try:
c_res, c_pos, c_mass = self.corrector.correct(res, 5, round(mass, i))
except ValueError:
continue
def test_multiple(self):
n_mod_mass = 42.010565
oxi_mass = 15.9949
phospho_mass = 79.966331
peptide = "MRAMSLVSNEGDSEQNEIR"
uncorrected_positions = np.array([1, 5])
uncorrected_masses = np.array([STD_AA_MASS["M"] + n_mod_mass + oxi_mass,
STD_AA_MASS["S"] + phospho_mass])
true_positions = np.array([0, 1, 5])
true_masses = np.array([n_mod_mass,
oxi_mass,
phospho_mass])
corrected_positions, corrected_masses = self.corrector.correct_multiple(peptide,
uncorrected_positions,
uncorrected_masses)
self.assertTrue(np.all(corrected_positions == true_positions),
"Positions are {}, not {}".format(corrected_positions, true_positions))
self.assertTrue(np.all(corrected_masses == true_masses),
"Masses are {}, not {}".format(corrected_positions, true_positions))
def example_generator(file_name):
with open(file_name, "rb") as source:
examples = pickle.load(source)
for e in examples:
yield e
class TestIDExtractors(unittest.TestCase):
program_list = ["comet", "percolator"]
instrument_list = ["qexactive", "velos"]
global_answers = {("comet", "qexactive") : [
dict(scan=2, charge_states=2, peptides="MRAMSLVSNEGDSEQNEIR", mod_positions=np.array([ 1, 5, 8, 13])),
dict(scan=3, charge_states=2, peptides="KEESEESDDDMGFGLFD", mod_positions=np.array([ 4, 7, 11 ])),
dict(scan=4, charge_states=2, peptides="KEESEESDDDMGFGLFD", mod_positions=np.array([ 4, 7 ]))
],
("comet", "velos") : [
dict(scan=2, charge_states=3, peptides="QADIQSTVLQINMPRGDLPVGNYQKMAKLADAR", mod_positions=np.array([ 13, 23 ])),
dict(scan=3, charge_states=4, peptides="ALSTCASHFTAVSVFYGTVIFIYLQPSSSHSMDTDK", mod_positions=np.array([ 5, 10, 28, 32 ])),
dict(scan=4, charge_states=2, peptides="LLVKKIVSLVR", mod_positions=np.array([]))
],
("percolator", "qexactive") : [
dict(scan=26840, charge_states=3, peptides="ATVPVAAATAAEGEGSPPAVAAVAGPPAAAEVGGGVGGSSR", mod_positions=np.array([ 16 ])),
dict(scan=27795, charge_states=2, peptides="GEADLFDSGDIFSTGTGSQSVER", mod_positions=np.array([ 16 ])),
dict(scan=22462, charge_states=3, peptides="LAEAPSPAPTPSPTPVEDLGPQTSTSPGR", mod_positions=np.array([]))
],
("percolator", "velos") : [
dict(scan=28126, charge_states=3, peptides="KGDVVHCWYTGTLQDGTVFDTNIQTSAK", mod_positions=np.array([ 7 ])),
dict(scan=33362, charge_states=3, peptides="HQILEQAVEDYAETVHQLSK", mod_positions=np.array([])),
dict(scan=28509, charge_states=3, peptides="RMATEVAADALGEEWKGYVVR", mod_positions=np.array([]))
],
}
def test_pepxml_extractor(self):
extractor = id_parsers.PepXMLExtractor()
for prog, instr in product(self.program_list, self.instrument_list):
file_name = "_".join([prog, instr, "pepxml", "examples"]) + ".pkl"
for ind, examp in enumerate(example_generator(
os.path.join("test", "pyteomics_examples", "pepxml", file_name)
)):
extracted_data = extractor.extract(examp)
answers = self.global_answers[(prog, instr)]
self.assertEqual(extracted_data["scans"][0], answers[ind]["scan"])
self.assertEqual(extracted_data["charge_states"][0], answers[ind]["charge_states"])
self.assertEqual(extracted_data["peptides"][0], answers[ind]["peptides"])
self.assertTrue(np.all(extracted_data["peptides"][0] == answers[ind]["peptides"])) # Comparing arrays
| 41.935065 | 148 | 0.539796 | import unittest
import os
import pickle
from itertools import product
from pyascore import id_parsers
import numpy as np
from pyteomics import mass
STD_AA_MASS = mass.std_aa_mass
class TestMassCorrector(unittest.TestCase):
corrector = id_parsers.MassCorrector()
def test_n_term(self):
res = "X"
mass = 42.010565
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 0, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
('n', 0, mass)
)
res = "M"
n_mod_mass = 42.010565
mass = STD_AA_MASS[res] + n_mod_mass
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 1, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
('n', 0, n_mod_mass)
)
def test_n_term_combined(self):
res = "M"
n_mod_mass = 42.010565
oxi_mass = 15.9949
mass = STD_AA_MASS[res] + n_mod_mass + oxi_mass
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 1, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
('n', 0, n_mod_mass)
)
self.assertEqual(
(c_res[1], c_pos[1], c_mass[1]),
('M', 1, oxi_mass)
)
def test_res(self):
res = "S"
phospho_mass = 79.966331
mass = STD_AA_MASS[res] + phospho_mass
for i in range(6):
c_res, c_pos, c_mass = self.corrector.correct(res, 5, round(mass, i))
self.assertEqual(
(c_res[0], c_pos[0], c_mass[0]),
(res, 5, phospho_mass)
)
def test_not_found(self):
res = "M"
phospho_mass = 79.966331
mass = STD_AA_MASS[res] + phospho_mass
for i in range(6):
try:
c_res, c_pos, c_mass = self.corrector.correct(res, 5, round(mass, i))
except ValueError:
continue
def test_multiple(self):
n_mod_mass = 42.010565
oxi_mass = 15.9949
phospho_mass = 79.966331
peptide = "MRAMSLVSNEGDSEQNEIR"
uncorrected_positions = np.array([1, 5])
uncorrected_masses = np.array([STD_AA_MASS["M"] + n_mod_mass + oxi_mass,
STD_AA_MASS["S"] + phospho_mass])
true_positions = np.array([0, 1, 5])
true_masses = np.array([n_mod_mass,
oxi_mass,
phospho_mass])
corrected_positions, corrected_masses = self.corrector.correct_multiple(peptide,
uncorrected_positions,
uncorrected_masses)
self.assertTrue(np.all(corrected_positions == true_positions),
"Positions are {}, not {}".format(corrected_positions, true_positions))
self.assertTrue(np.all(corrected_masses == true_masses),
"Masses are {}, not {}".format(corrected_positions, true_positions))
def example_generator(file_name):
with open(file_name, "rb") as source:
examples = pickle.load(source)
for e in examples:
yield e
class TestIDExtractors(unittest.TestCase):
program_list = ["comet", "percolator"]
instrument_list = ["qexactive", "velos"]
global_answers = {("comet", "qexactive") : [
dict(scan=2, charge_states=2, peptides="MRAMSLVSNEGDSEQNEIR", mod_positions=np.array([ 1, 5, 8, 13])),
dict(scan=3, charge_states=2, peptides="KEESEESDDDMGFGLFD", mod_positions=np.array([ 4, 7, 11 ])),
dict(scan=4, charge_states=2, peptides="KEESEESDDDMGFGLFD", mod_positions=np.array([ 4, 7 ]))
],
("comet", "velos") : [
dict(scan=2, charge_states=3, peptides="QADIQSTVLQINMPRGDLPVGNYQKMAKLADAR", mod_positions=np.array([ 13, 23 ])),
dict(scan=3, charge_states=4, peptides="ALSTCASHFTAVSVFYGTVIFIYLQPSSSHSMDTDK", mod_positions=np.array([ 5, 10, 28, 32 ])),
dict(scan=4, charge_states=2, peptides="LLVKKIVSLVR", mod_positions=np.array([]))
],
("percolator", "qexactive") : [
dict(scan=26840, charge_states=3, peptides="ATVPVAAATAAEGEGSPPAVAAVAGPPAAAEVGGGVGGSSR", mod_positions=np.array([ 16 ])),
dict(scan=27795, charge_states=2, peptides="GEADLFDSGDIFSTGTGSQSVER", mod_positions=np.array([ 16 ])),
dict(scan=22462, charge_states=3, peptides="LAEAPSPAPTPSPTPVEDLGPQTSTSPGR", mod_positions=np.array([]))
],
("percolator", "velos") : [
dict(scan=28126, charge_states=3, peptides="KGDVVHCWYTGTLQDGTVFDTNIQTSAK", mod_positions=np.array([ 7 ])),
dict(scan=33362, charge_states=3, peptides="HQILEQAVEDYAETVHQLSK", mod_positions=np.array([])),
dict(scan=28509, charge_states=3, peptides="RMATEVAADALGEEWKGYVVR", mod_positions=np.array([]))
],
}
def test_pepxml_extractor(self):
extractor = id_parsers.PepXMLExtractor()
for prog, instr in product(self.program_list, self.instrument_list):
file_name = "_".join([prog, instr, "pepxml", "examples"]) + ".pkl"
for ind, examp in enumerate(example_generator(
os.path.join("test", "pyteomics_examples", "pepxml", file_name)
)):
extracted_data = extractor.extract(examp)
answers = self.global_answers[(prog, instr)]
self.assertEqual(extracted_data["scans"][0], answers[ind]["scan"])
self.assertEqual(extracted_data["charge_states"][0], answers[ind]["charge_states"])
self.assertEqual(extracted_data["peptides"][0], answers[ind]["peptides"])
self.assertTrue(np.all(extracted_data["peptides"][0] == answers[ind]["peptides"]))
| true | true |
f7f4d0287cb7142bf8ea3a0a8c8bdc7398d46a9a | 1,051 | py | Python | test/test_del_contact_from_group.py | DmitriyNeurov/python_training | 64de4dc4dd392ae341933ea8721bdd694cfc03db | [
"Apache-2.0"
] | null | null | null | test/test_del_contact_from_group.py | DmitriyNeurov/python_training | 64de4dc4dd392ae341933ea8721bdd694cfc03db | [
"Apache-2.0"
] | null | null | null | test/test_del_contact_from_group.py | DmitriyNeurov/python_training | 64de4dc4dd392ae341933ea8721bdd694cfc03db | [
"Apache-2.0"
] | null | null | null | from model.group import Group
from model.contact import Contact
def test_del_contact_from_group(app, db, orm):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
if db.get_contact_list() == 0:
app.contact.create(Contact(firstname="Dmitriy"))
old_contacts = db.get_contact_list()
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
assert old_contacts == new_contacts
if orm.get_contacts_in_group == 0:
app.contact.add_contact_in_group()
old_contacts_in_group = orm.get_contacts_in_group(Group(id="2"))
new_contacts_in_group = orm.get_contacts_in_group(Group(id="2"))
assert len(old_contacts_in_group) == len(new_contacts_in_group)
app.contact.del_contact_from_group()
new_contacts_in_group = orm.get_contacts_in_group(Group(id="2"))
assert len(old_contacts_in_group) - 1 == len(new_contacts_in_group)
| 40.423077 | 71 | 0.730733 | from model.group import Group
from model.contact import Contact
def test_del_contact_from_group(app, db, orm):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
if db.get_contact_list() == 0:
app.contact.create(Contact(firstname="Dmitriy"))
old_contacts = db.get_contact_list()
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
assert old_contacts == new_contacts
if orm.get_contacts_in_group == 0:
app.contact.add_contact_in_group()
old_contacts_in_group = orm.get_contacts_in_group(Group(id="2"))
new_contacts_in_group = orm.get_contacts_in_group(Group(id="2"))
assert len(old_contacts_in_group) == len(new_contacts_in_group)
app.contact.del_contact_from_group()
new_contacts_in_group = orm.get_contacts_in_group(Group(id="2"))
assert len(old_contacts_in_group) - 1 == len(new_contacts_in_group)
| true | true |
f7f4d0a0630ca5f26f7f623613c839e1f70d6aeb | 227 | py | Python | b0mb3r/services/tabris.py | Superior0/b0mb3r_r | 216b1851303f5101b09457ba31749b63e4d0d5e8 | [
"Apache-2.0"
] | null | null | null | b0mb3r/services/tabris.py | Superior0/b0mb3r_r | 216b1851303f5101b09457ba31749b63e4d0d5e8 | [
"Apache-2.0"
] | null | null | null | b0mb3r/services/tabris.py | Superior0/b0mb3r_r | 216b1851303f5101b09457ba31749b63e4d0d5e8 | [
"Apache-2.0"
] | null | null | null | from b0mb3r.services.service import Service
class Tabris(Service):
async def run(self):
await self.post(
"https://lk.tabris.ru/reg/", data={"action": "phone", "phone": self.formatted_phone},
)
| 25.222222 | 97 | 0.621145 | from b0mb3r.services.service import Service
class Tabris(Service):
async def run(self):
await self.post(
"https://lk.tabris.ru/reg/", data={"action": "phone", "phone": self.formatted_phone},
)
| true | true |
f7f4d10469d7c901d03b5db5f6072168f0caf564 | 2,280 | py | Python | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/ListSourceReplicaRequest.py | silent-beaters/aliyun-openapi-python-sdk | 7a025eabdad622af07affc3a7beeae1c5def469d | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/ListSourceReplicaRequest.py | silent-beaters/aliyun-openapi-python-sdk | 7a025eabdad622af07affc3a7beeae1c5def469d | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/ListSourceReplicaRequest.py | silent-beaters/aliyun-openapi-python-sdk | 7a025eabdad622af07affc3a7beeae1c5def469d | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class ListSourceReplicaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'ListSourceReplica','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IotInstanceId(self):
return self.get_body_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_body_params('IotInstanceId', IotInstanceId)
def get_Context(self):
return self.get_body_params().get('Context')
def set_Context(self,Context):
self.add_body_params('Context', Context)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_SourceType(self):
return self.get_query_params().get('SourceType')
def set_SourceType(self,SourceType):
self.add_query_param('SourceType',SourceType)
def get_PageNo(self):
return self.get_query_params().get('PageNo')
def set_PageNo(self,PageNo):
self.add_query_param('PageNo',PageNo)
def get_LpInstanceId(self):
return self.get_query_params().get('LpInstanceId')
def set_LpInstanceId(self,LpInstanceId):
self.add_query_param('LpInstanceId',LpInstanceId) | 33.529412 | 76 | 0.760526 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class ListSourceReplicaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'ListSourceReplica','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IotInstanceId(self):
return self.get_body_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_body_params('IotInstanceId', IotInstanceId)
def get_Context(self):
return self.get_body_params().get('Context')
def set_Context(self,Context):
self.add_body_params('Context', Context)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_SourceType(self):
return self.get_query_params().get('SourceType')
def set_SourceType(self,SourceType):
self.add_query_param('SourceType',SourceType)
def get_PageNo(self):
return self.get_query_params().get('PageNo')
def set_PageNo(self,PageNo):
self.add_query_param('PageNo',PageNo)
def get_LpInstanceId(self):
return self.get_query_params().get('LpInstanceId')
def set_LpInstanceId(self,LpInstanceId):
self.add_query_param('LpInstanceId',LpInstanceId) | true | true |
f7f4d1610bfdea2050a0041cbdd29c08242ac386 | 16,255 | py | Python | tensorflow_probability/python/distributions/multivariate_student_t_test.py | ValentinMouret/probability | 7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4 | [
"Apache-2.0"
] | 1 | 2020-04-29T11:29:25.000Z | 2020-04-29T11:29:25.000Z | tensorflow_probability/python/distributions/multivariate_student_t_test.py | ValentinMouret/probability | 7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/multivariate_student_t_test.py | ValentinMouret/probability | 7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4 | [
"Apache-2.0"
] | 1 | 2020-07-04T21:37:20.000Z | 2020-07-04T21:37:20.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the MultivariateStudentTLinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import multivariate_student_t as mvt
from tensorflow_probability.python.distributions import student_t
from tensorflow_probability.python.internal import test_case
from tensorflow_probability.python.internal import test_util as tfp_test_util
tfe = tf.contrib.eager
@tfe.run_all_tests_in_graph_and_eager_modes
class MultivariateStudentTTestFloat32StaticShape(
test_case.TestCase, parameterized.TestCase,
tfp_test_util.VectorDistributionTestHelpers):
dtype = tf.float32
use_static_shape = True
def _input(self, value):
"""Helper to create inputs with varied dtypes an static shapes."""
value = tf.cast(value, self.dtype)
return tf.placeholder_with_default(
value, shape=value.shape if self.use_static_shape else None)
# pyformat: disable
# pylint: disable=bad-whitespace
@parameterized.parameters(
# loc df diag batch_shape
([0., 0.], 1., [1., 1.], []),
(0., 1., [1., 1.], []),
([[[0., 0.]]], 1., [1., 1.], [1, 1]),
([0., 0.], [[1.]], [1., 1.], [1, 1]),
([0., 0.], 1., [[[1., 1.]]], [1, 1]),
([[[0., 0.]]], [[1.]], [[[1., 1.]]], [1, 1]),
)
# pylint: enable=bad-whitespace
# pyformat: enable
def testBroadcasting(self, loc, df, diag, batch_shape):
# Test that broadcasting works across all 3 parameters.
loc = self._input(loc)
df = self._input(df)
diag = self._input(diag)
scale = tf.linalg.LinearOperatorDiag(diag, is_positive_definite=True)
dist = mvt.MultivariateStudentTLinearOperator(
loc=loc, df=df, scale=scale, validate_args=True)
sample = dist.sample(3)
log_prob = dist.log_prob(sample)
mean = dist.mean()
mode = dist.mode()
cov = dist.covariance()
std = dist.stddev()
var = dist.variance()
entropy = dist.entropy()
if self.use_static_shape:
self.assertAllEqual([3] + batch_shape + [2], sample.shape)
self.assertAllEqual([3] + batch_shape, log_prob.shape)
self.assertAllEqual(batch_shape + [2], mean.shape)
self.assertAllEqual(batch_shape + [2], mode.shape)
self.assertAllEqual(batch_shape + [2, 2], cov.shape)
self.assertAllEqual(batch_shape + [2], std.shape)
self.assertAllEqual(batch_shape + [2], var.shape)
self.assertAllEqual(batch_shape, entropy.shape)
self.assertAllEqual([2], dist.event_shape)
self.assertAllEqual(batch_shape, dist.batch_shape)
sample = self.evaluate(sample)
log_prob = self.evaluate(log_prob)
mean = self.evaluate(mean)
mode = self.evaluate(mode)
cov = self.evaluate(cov)
std = self.evaluate(std)
var = self.evaluate(var)
entropy = self.evaluate(entropy)
self.assertAllEqual([3] + batch_shape + [2], sample.shape)
self.assertAllEqual([3] + batch_shape, log_prob.shape)
self.assertAllEqual(batch_shape + [2], mean.shape)
self.assertAllEqual(batch_shape + [2], mode.shape)
self.assertAllEqual(batch_shape + [2, 2], cov.shape)
self.assertAllEqual(batch_shape + [2], std.shape)
self.assertAllEqual(batch_shape + [2], var.shape)
self.assertAllEqual(batch_shape, entropy.shape)
self.assertAllEqual([2], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
def testNonPositiveDf(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"`df` must be positive"):
self.evaluate(
mvt.MultivariateStudentTLinearOperator(
loc=self._input([0.]),
df=self._input(0.),
scale=tf.linalg.LinearOperatorDiag(
self._input([1.]), is_positive_definite=True),
validate_args=True).df)
def testBadScaleDType(self):
with self.assertRaisesRegexp(TypeError,
"`scale` must have floating-point dtype."):
mvt.MultivariateStudentTLinearOperator(
loc=[0.],
df=1.,
scale=tf.linalg.LinearOperatorIdentity(
num_rows=1, dtype=tf.int32, is_positive_definite=True))
def testNotPositiveDefinite(self):
with self.assertRaisesRegexp(ValueError,
"`scale` must be positive definite."):
mvt.MultivariateStudentTLinearOperator(
loc=self._input([0.]),
df=self._input(1.),
scale=tf.linalg.LinearOperatorDiag(self._input([1.])),
validate_args=True)
def testMeanAllDefined(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input(2.),
scale=tf.linalg.LinearOperatorDiag(self._input([1., 1.])))
mean = self.evaluate(dist.mean())
self.assertAllClose([0., 0.], mean)
def testMeanSomeUndefinedNaNAllowed(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([[0., 0.], [1., 1.]]),
df=self._input([1., 2.]),
scale=tf.linalg.LinearOperatorDiag(self._input([[1., 1.], [1., 1.]])),
allow_nan_stats=True)
mean = self.evaluate(dist.mean())
self.assertAllClose([[np.nan, np.nan], [1., 1.]], mean)
def testMeanSomeUndefinedNaNNotAllowed(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([[0., 0.], [1., 1.]]),
df=self._input([1., 2.]),
scale=tf.linalg.LinearOperatorDiag(self._input([[1., 1.], [1., 1.]])),
allow_nan_stats=False)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"mean not defined for components of df <= 1"):
self.evaluate(dist.mean())
def testMode(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=[0., 0.], df=2., scale=tf.linalg.LinearOperatorDiag([[1., 1.]]))
mode = self.evaluate(dist.mode())
self.assertAllClose([[0., 0.]], mode)
# pyformat: disable
# pylint: disable=bad-whitespace
@parameterized.parameters(
# diag full expected_mvn_cov
([2., 2.], None, [[4., 0.], [0., 4.]]),
(None, [[2., 1.], [1., 2.]], [[5., 4.], [4., 5.]]),
)
# pyformat: enable
# pylint: enable=bad-whitespace
def testCovarianceAllDefined(self,
diag=None,
full=None,
expected_mvn_cov=None):
if diag is not None:
scale = tf.linalg.LinearOperatorDiag(self._input(diag))
else:
scale = tf.linalg.LinearOperatorFullMatrix(self._input(full))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]), df=self._input(3.), scale=scale)
cov = self.evaluate(dist.covariance())
self.assertAllClose(np.array(expected_mvn_cov) * 3. / (3. - 2.), cov)
def testCovarianceSomeUndefinedNaNAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input([2., 1.]),
scale=scale,
allow_nan_stats=True)
cov = self.evaluate(dist.covariance())
self.assertAllClose(np.full([2, 2], np.inf), cov[0])
self.assertAllClose(np.full([2, 2], np.nan), cov[1])
def testCovarianceSomeUndefinedNaNNotAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input(1.),
scale=scale,
allow_nan_stats=False)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"covariance not defined for components of df <= 1"):
self.evaluate(dist.covariance())
# pyformat: disable
# pylint: disable=bad-whitespace
@parameterized.parameters(
# diag full update expected_mvn_var
([2., 2.], None, None, [4., 4.]),
(None, [[2., 1.], [1., 2.]], None, [5., 5.]),
([2., 2.], None, [[1.],[1.]], [10., 10.]),
)
# pylint: enable=bad-whitespace
# pyformat: enable
def testVarianceStdAllDefined(self,
diag=None,
full=None,
update=None,
expected_mvn_var=None):
if diag is not None:
scale = tf.linalg.LinearOperatorDiag(self._input(diag))
elif full is not None:
scale = tf.linalg.LinearOperatorFullMatrix(self._input(full))
if update is not None:
scale = tf.linalg.LinearOperatorLowRankUpdate(scale, self._input(update))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]), df=self._input(3.), scale=scale)
var = self.evaluate(dist.variance())
std = self.evaluate(dist.stddev())
# df = 3, so we expect the variance of the MVT to exceed MVN by a factor of
# 3 / (3 - 2) = 3.
self.assertAllClose(np.array(expected_mvn_var) * 3., var)
self.assertAllClose(np.sqrt(np.array(expected_mvn_var) * 3.), std)
def testVarianceStdSomeUndefinedNaNAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input([2., 1.]),
scale=scale,
allow_nan_stats=True)
var = self.evaluate(dist.variance())
std = self.evaluate(dist.stddev())
self.assertAllClose([np.inf, np.inf], var[0])
self.assertAllClose([np.nan, np.nan], var[1])
self.assertAllClose([np.inf, np.inf], std[0])
self.assertAllClose([np.nan, np.nan], std[1])
def testVarianceStdSomeUndefinedNaNNotAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input(1.),
scale=scale,
allow_nan_stats=False)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"variance not defined for components of df <= 1"):
self.evaluate(dist.variance())
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"standard deviation not defined for components of df <= 1"):
self.evaluate(dist.stddev())
def testEntropy(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]), df=self._input([2., 3.]), scale=scale)
# From Kotz S. and Nadarajah S. (2004). Multivariate t Distributions and
# Their Applications. Cambridge University Press. p22.
self.assertAllClose(
[0.5 * np.log(16.) + 3.83788, 0.5 * np.log(16.) + 3.50454],
dist.entropy())
def testSamplingConsistency(self):
# pyformat: disable
scale = tf.linalg.LinearOperatorFullMatrix(self._input(
[[2., -1.],
[-1., 2.]]))
# pyformat: enable
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([1., 2.]), df=self._input(5.), scale=scale)
self.run_test_sample_consistent_mean_covariance(
sess_run_fn=self.evaluate, dist=dist)
def testSamplingDeterministic(self):
# pyformat: disable
scale = tf.linalg.LinearOperatorFullMatrix(self._input(
[[2., -1.],
[-1., 2.]]))
# pyformat: enable
tf.set_random_seed(2)
dist1 = mvt.MultivariateStudentTLinearOperator(
loc=[1., 2.], df=5., scale=scale)
samples1 = self.evaluate(dist1.sample(100, seed=1))
tf.set_random_seed(2)
dist2 = mvt.MultivariateStudentTLinearOperator(
loc=[1., 2.], df=5., scale=scale)
samples2 = self.evaluate(dist2.sample(100, seed=1))
self.assertAllClose(samples1, samples2)
def testSamplingFullyReparameterized(self):
df = self._input(2.)
loc = self._input([1., 2.])
diag = self._input([3., 4.])
with tf.GradientTape() as tape:
tape.watch(df)
tape.watch(loc)
tape.watch(diag)
scale = tf.linalg.LinearOperatorDiag(diag)
dist = mvt.MultivariateStudentTLinearOperator(loc=loc, df=df, scale=scale)
samples = dist.sample(100)
grad_df, grad_loc, grad_diag = tape.gradient(samples, [df, loc, diag])
self.assertIsNotNone(grad_df)
self.assertIsNotNone(grad_loc)
self.assertIsNotNone(grad_diag)
def testSamplingSmallDfNoNaN(self):
scale = tf.linalg.LinearOperatorDiag(self._input([1., 1.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input([1e-1, 1e-5, 1e-10, 1e-20]),
scale=scale)
samples = dist.sample(int(2e5), seed=1)
log_probs = dist.log_prob(samples)
samples, log_probs = self.evaluate([samples, log_probs])
self.assertTrue(np.all(np.isfinite(samples)))
self.assertTrue(np.all(np.isfinite(log_probs)))
def testLogProb(self):
# Test that numerically integrating over some portion of the domain yields a
# normalization constant of close to 1.
# pyformat: disable
scale = tf.linalg.LinearOperatorFullMatrix(
self._input([[1., -0.5],
[-0.5, 1.]]))
# pyformat: enable
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([1., 1.]), df=self._input(5.), scale=scale)
spacings = tf.cast(tf.linspace(-20., 20., 100), self.dtype)
x, y = tf.meshgrid(spacings, spacings)
points = tf.concat([x[..., tf.newaxis], y[..., tf.newaxis]], -1)
log_probs = dist.log_prob(points)
normalization = tf.exp(
tf.reduce_logsumexp(log_probs)) * (spacings[1] - spacings[0])**2
self.assertAllClose(1., self.evaluate(normalization), atol=1e-3)
mode_log_prob = dist.log_prob(dist.mode())
self.assertTrue(np.all(self.evaluate(mode_log_prob >= log_probs)))
@parameterized.parameters(1., 3., 10.)
def testHypersphereVolume(self, radius):
# pyformat: disable
scale = tf.linalg.LinearOperatorFullMatrix(
self._input([[1., -0.5],
[-0.5, 1.]]))
# pyformat: enable
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([1., 1.]), df=self._input(4.), scale=scale)
self.run_test_sample_consistent_log_prob(
sess_run_fn=self.evaluate,
dist=dist,
radius=radius,
num_samples=int(5e6),
rtol=0.05)
def testLogProbSameFor1D(self):
# 1D MVT is exactly a regular Student's T distribution.
t_dist = student_t.StudentT(
df=self._input(5.), loc=self._input(2.), scale=self._input(3.))
scale = tf.linalg.LinearOperatorDiag([self._input(3.)])
mvt_dist = mvt.MultivariateStudentTLinearOperator(
loc=[self._input(2.)], df=self._input(5.), scale=scale)
test_points = tf.cast(tf.linspace(-10.0, 10.0, 100), self.dtype)
t_log_probs = self.evaluate(t_dist.log_prob(test_points))
mvt_log_probs = self.evaluate(
mvt_dist.log_prob(test_points[..., tf.newaxis]))
self.assertAllClose(t_log_probs, mvt_log_probs)
class MultivariateStudentTTestFloat64StaticShape(
MultivariateStudentTTestFloat32StaticShape):
dtype = tf.float64
use_static_shape = True
class MultivariateStudentTTestFloat32DynamicShape(
MultivariateStudentTTestFloat32StaticShape):
dtype = tf.float32
use_static_shape = False
if __name__ == "__main__":
tf.test.main()
| 39.549878 | 85 | 0.640787 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import multivariate_student_t as mvt
from tensorflow_probability.python.distributions import student_t
from tensorflow_probability.python.internal import test_case
from tensorflow_probability.python.internal import test_util as tfp_test_util
tfe = tf.contrib.eager
@tfe.run_all_tests_in_graph_and_eager_modes
class MultivariateStudentTTestFloat32StaticShape(
test_case.TestCase, parameterized.TestCase,
tfp_test_util.VectorDistributionTestHelpers):
dtype = tf.float32
use_static_shape = True
def _input(self, value):
value = tf.cast(value, self.dtype)
return tf.placeholder_with_default(
value, shape=value.shape if self.use_static_shape else None)
@parameterized.parameters(
([0., 0.], 1., [1., 1.], []),
(0., 1., [1., 1.], []),
([[[0., 0.]]], 1., [1., 1.], [1, 1]),
([0., 0.], [[1.]], [1., 1.], [1, 1]),
([0., 0.], 1., [[[1., 1.]]], [1, 1]),
([[[0., 0.]]], [[1.]], [[[1., 1.]]], [1, 1]),
)
def testBroadcasting(self, loc, df, diag, batch_shape):
loc = self._input(loc)
df = self._input(df)
diag = self._input(diag)
scale = tf.linalg.LinearOperatorDiag(diag, is_positive_definite=True)
dist = mvt.MultivariateStudentTLinearOperator(
loc=loc, df=df, scale=scale, validate_args=True)
sample = dist.sample(3)
log_prob = dist.log_prob(sample)
mean = dist.mean()
mode = dist.mode()
cov = dist.covariance()
std = dist.stddev()
var = dist.variance()
entropy = dist.entropy()
if self.use_static_shape:
self.assertAllEqual([3] + batch_shape + [2], sample.shape)
self.assertAllEqual([3] + batch_shape, log_prob.shape)
self.assertAllEqual(batch_shape + [2], mean.shape)
self.assertAllEqual(batch_shape + [2], mode.shape)
self.assertAllEqual(batch_shape + [2, 2], cov.shape)
self.assertAllEqual(batch_shape + [2], std.shape)
self.assertAllEqual(batch_shape + [2], var.shape)
self.assertAllEqual(batch_shape, entropy.shape)
self.assertAllEqual([2], dist.event_shape)
self.assertAllEqual(batch_shape, dist.batch_shape)
sample = self.evaluate(sample)
log_prob = self.evaluate(log_prob)
mean = self.evaluate(mean)
mode = self.evaluate(mode)
cov = self.evaluate(cov)
std = self.evaluate(std)
var = self.evaluate(var)
entropy = self.evaluate(entropy)
self.assertAllEqual([3] + batch_shape + [2], sample.shape)
self.assertAllEqual([3] + batch_shape, log_prob.shape)
self.assertAllEqual(batch_shape + [2], mean.shape)
self.assertAllEqual(batch_shape + [2], mode.shape)
self.assertAllEqual(batch_shape + [2, 2], cov.shape)
self.assertAllEqual(batch_shape + [2], std.shape)
self.assertAllEqual(batch_shape + [2], var.shape)
self.assertAllEqual(batch_shape, entropy.shape)
self.assertAllEqual([2], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
def testNonPositiveDf(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"`df` must be positive"):
self.evaluate(
mvt.MultivariateStudentTLinearOperator(
loc=self._input([0.]),
df=self._input(0.),
scale=tf.linalg.LinearOperatorDiag(
self._input([1.]), is_positive_definite=True),
validate_args=True).df)
def testBadScaleDType(self):
with self.assertRaisesRegexp(TypeError,
"`scale` must have floating-point dtype."):
mvt.MultivariateStudentTLinearOperator(
loc=[0.],
df=1.,
scale=tf.linalg.LinearOperatorIdentity(
num_rows=1, dtype=tf.int32, is_positive_definite=True))
def testNotPositiveDefinite(self):
with self.assertRaisesRegexp(ValueError,
"`scale` must be positive definite."):
mvt.MultivariateStudentTLinearOperator(
loc=self._input([0.]),
df=self._input(1.),
scale=tf.linalg.LinearOperatorDiag(self._input([1.])),
validate_args=True)
def testMeanAllDefined(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input(2.),
scale=tf.linalg.LinearOperatorDiag(self._input([1., 1.])))
mean = self.evaluate(dist.mean())
self.assertAllClose([0., 0.], mean)
def testMeanSomeUndefinedNaNAllowed(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([[0., 0.], [1., 1.]]),
df=self._input([1., 2.]),
scale=tf.linalg.LinearOperatorDiag(self._input([[1., 1.], [1., 1.]])),
allow_nan_stats=True)
mean = self.evaluate(dist.mean())
self.assertAllClose([[np.nan, np.nan], [1., 1.]], mean)
def testMeanSomeUndefinedNaNNotAllowed(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([[0., 0.], [1., 1.]]),
df=self._input([1., 2.]),
scale=tf.linalg.LinearOperatorDiag(self._input([[1., 1.], [1., 1.]])),
allow_nan_stats=False)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"mean not defined for components of df <= 1"):
self.evaluate(dist.mean())
def testMode(self):
dist = mvt.MultivariateStudentTLinearOperator(
loc=[0., 0.], df=2., scale=tf.linalg.LinearOperatorDiag([[1., 1.]]))
mode = self.evaluate(dist.mode())
self.assertAllClose([[0., 0.]], mode)
@parameterized.parameters(
([2., 2.], None, [[4., 0.], [0., 4.]]),
(None, [[2., 1.], [1., 2.]], [[5., 4.], [4., 5.]]),
)
def testCovarianceAllDefined(self,
diag=None,
full=None,
expected_mvn_cov=None):
if diag is not None:
scale = tf.linalg.LinearOperatorDiag(self._input(diag))
else:
scale = tf.linalg.LinearOperatorFullMatrix(self._input(full))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]), df=self._input(3.), scale=scale)
cov = self.evaluate(dist.covariance())
self.assertAllClose(np.array(expected_mvn_cov) * 3. / (3. - 2.), cov)
def testCovarianceSomeUndefinedNaNAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input([2., 1.]),
scale=scale,
allow_nan_stats=True)
cov = self.evaluate(dist.covariance())
self.assertAllClose(np.full([2, 2], np.inf), cov[0])
self.assertAllClose(np.full([2, 2], np.nan), cov[1])
def testCovarianceSomeUndefinedNaNNotAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input(1.),
scale=scale,
allow_nan_stats=False)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"covariance not defined for components of df <= 1"):
self.evaluate(dist.covariance())
@parameterized.parameters(
([2., 2.], None, None, [4., 4.]),
(None, [[2., 1.], [1., 2.]], None, [5., 5.]),
([2., 2.], None, [[1.],[1.]], [10., 10.]),
)
def testVarianceStdAllDefined(self,
diag=None,
full=None,
update=None,
expected_mvn_var=None):
if diag is not None:
scale = tf.linalg.LinearOperatorDiag(self._input(diag))
elif full is not None:
scale = tf.linalg.LinearOperatorFullMatrix(self._input(full))
if update is not None:
scale = tf.linalg.LinearOperatorLowRankUpdate(scale, self._input(update))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]), df=self._input(3.), scale=scale)
var = self.evaluate(dist.variance())
std = self.evaluate(dist.stddev())
self.assertAllClose(np.array(expected_mvn_var) * 3., var)
self.assertAllClose(np.sqrt(np.array(expected_mvn_var) * 3.), std)
def testVarianceStdSomeUndefinedNaNAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input([2., 1.]),
scale=scale,
allow_nan_stats=True)
var = self.evaluate(dist.variance())
std = self.evaluate(dist.stddev())
self.assertAllClose([np.inf, np.inf], var[0])
self.assertAllClose([np.nan, np.nan], var[1])
self.assertAllClose([np.inf, np.inf], std[0])
self.assertAllClose([np.nan, np.nan], std[1])
def testVarianceStdSomeUndefinedNaNNotAllowed(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input(1.),
scale=scale,
allow_nan_stats=False)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"variance not defined for components of df <= 1"):
self.evaluate(dist.variance())
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"standard deviation not defined for components of df <= 1"):
self.evaluate(dist.stddev())
def testEntropy(self):
scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]), df=self._input([2., 3.]), scale=scale)
self.assertAllClose(
[0.5 * np.log(16.) + 3.83788, 0.5 * np.log(16.) + 3.50454],
dist.entropy())
def testSamplingConsistency(self):
scale = tf.linalg.LinearOperatorFullMatrix(self._input(
[[2., -1.],
[-1., 2.]]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([1., 2.]), df=self._input(5.), scale=scale)
self.run_test_sample_consistent_mean_covariance(
sess_run_fn=self.evaluate, dist=dist)
def testSamplingDeterministic(self):
scale = tf.linalg.LinearOperatorFullMatrix(self._input(
[[2., -1.],
[-1., 2.]]))
tf.set_random_seed(2)
dist1 = mvt.MultivariateStudentTLinearOperator(
loc=[1., 2.], df=5., scale=scale)
samples1 = self.evaluate(dist1.sample(100, seed=1))
tf.set_random_seed(2)
dist2 = mvt.MultivariateStudentTLinearOperator(
loc=[1., 2.], df=5., scale=scale)
samples2 = self.evaluate(dist2.sample(100, seed=1))
self.assertAllClose(samples1, samples2)
def testSamplingFullyReparameterized(self):
df = self._input(2.)
loc = self._input([1., 2.])
diag = self._input([3., 4.])
with tf.GradientTape() as tape:
tape.watch(df)
tape.watch(loc)
tape.watch(diag)
scale = tf.linalg.LinearOperatorDiag(diag)
dist = mvt.MultivariateStudentTLinearOperator(loc=loc, df=df, scale=scale)
samples = dist.sample(100)
grad_df, grad_loc, grad_diag = tape.gradient(samples, [df, loc, diag])
self.assertIsNotNone(grad_df)
self.assertIsNotNone(grad_loc)
self.assertIsNotNone(grad_diag)
def testSamplingSmallDfNoNaN(self):
scale = tf.linalg.LinearOperatorDiag(self._input([1., 1.]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([0., 0.]),
df=self._input([1e-1, 1e-5, 1e-10, 1e-20]),
scale=scale)
samples = dist.sample(int(2e5), seed=1)
log_probs = dist.log_prob(samples)
samples, log_probs = self.evaluate([samples, log_probs])
self.assertTrue(np.all(np.isfinite(samples)))
self.assertTrue(np.all(np.isfinite(log_probs)))
def testLogProb(self):
scale = tf.linalg.LinearOperatorFullMatrix(
self._input([[1., -0.5],
[-0.5, 1.]]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([1., 1.]), df=self._input(5.), scale=scale)
spacings = tf.cast(tf.linspace(-20., 20., 100), self.dtype)
x, y = tf.meshgrid(spacings, spacings)
points = tf.concat([x[..., tf.newaxis], y[..., tf.newaxis]], -1)
log_probs = dist.log_prob(points)
normalization = tf.exp(
tf.reduce_logsumexp(log_probs)) * (spacings[1] - spacings[0])**2
self.assertAllClose(1., self.evaluate(normalization), atol=1e-3)
mode_log_prob = dist.log_prob(dist.mode())
self.assertTrue(np.all(self.evaluate(mode_log_prob >= log_probs)))
@parameterized.parameters(1., 3., 10.)
def testHypersphereVolume(self, radius):
scale = tf.linalg.LinearOperatorFullMatrix(
self._input([[1., -0.5],
[-0.5, 1.]]))
dist = mvt.MultivariateStudentTLinearOperator(
loc=self._input([1., 1.]), df=self._input(4.), scale=scale)
self.run_test_sample_consistent_log_prob(
sess_run_fn=self.evaluate,
dist=dist,
radius=radius,
num_samples=int(5e6),
rtol=0.05)
def testLogProbSameFor1D(self):
t_dist = student_t.StudentT(
df=self._input(5.), loc=self._input(2.), scale=self._input(3.))
scale = tf.linalg.LinearOperatorDiag([self._input(3.)])
mvt_dist = mvt.MultivariateStudentTLinearOperator(
loc=[self._input(2.)], df=self._input(5.), scale=scale)
test_points = tf.cast(tf.linspace(-10.0, 10.0, 100), self.dtype)
t_log_probs = self.evaluate(t_dist.log_prob(test_points))
mvt_log_probs = self.evaluate(
mvt_dist.log_prob(test_points[..., tf.newaxis]))
self.assertAllClose(t_log_probs, mvt_log_probs)
class MultivariateStudentTTestFloat64StaticShape(
MultivariateStudentTTestFloat32StaticShape):
dtype = tf.float64
use_static_shape = True
class MultivariateStudentTTestFloat32DynamicShape(
MultivariateStudentTTestFloat32StaticShape):
dtype = tf.float32
use_static_shape = False
if __name__ == "__main__":
tf.test.main()
| true | true |
f7f4d1e5a55e48b9b376d603ce872c4de3ef6754 | 1,187 | py | Python | unittests/test_IO.py | tyburesh/Forest | 59b213e4d8a0587d8bb24b9f7a2d05b9734b4bd7 | [
"BSD-3-Clause"
] | null | null | null | unittests/test_IO.py | tyburesh/Forest | 59b213e4d8a0587d8bb24b9f7a2d05b9734b4bd7 | [
"BSD-3-Clause"
] | null | null | null | unittests/test_IO.py | tyburesh/Forest | 59b213e4d8a0587d8bb24b9f7a2d05b9734b4bd7 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2017 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, eshook@gmail.edu)
@contributors: <Contribute and add your name here!>
"""
from forest import *
import unittest
# Test forest/bobs/Bob.py
def maketiles(nfiles,nrows,ncols):
for f_i in range(nfiles):
f = open("unittests/tmp_raster"+str(f_i)+".asc","w")
f.write("ncols "+str(ncols)+"\n")
f.write("nrows "+str(nrows)+"\n")
f.write("xllcorner 0.0\n")
f.write("yllcorner 0.0\n")
f.write("cellsize 1.0\n")
f.write("NODATA_value -999\n")
for i in range(nrows):
for j in range(ncols):
f.write(str(i+j+f_i)+" ")
f.write("\n")
f.close()
#maketiles(nfiles,nrows,ncols)
class TestIO(unittest.TestCase):
def setUp(self):
nfiles = 3
nrows = 13
ncols = 13
maketiles(nfiles,nrows,ncols)
def test_io(self):
b1 = Raster()
self.assertEqual(b1.y,0)
test_IO_suite = unittest.TestLoader().loadTestsFromTestCase(TestIO)
| 25.255319 | 97 | 0.593934 |
from forest import *
import unittest
def maketiles(nfiles,nrows,ncols):
for f_i in range(nfiles):
f = open("unittests/tmp_raster"+str(f_i)+".asc","w")
f.write("ncols "+str(ncols)+"\n")
f.write("nrows "+str(nrows)+"\n")
f.write("xllcorner 0.0\n")
f.write("yllcorner 0.0\n")
f.write("cellsize 1.0\n")
f.write("NODATA_value -999\n")
for i in range(nrows):
for j in range(ncols):
f.write(str(i+j+f_i)+" ")
f.write("\n")
f.close()
class TestIO(unittest.TestCase):
def setUp(self):
nfiles = 3
nrows = 13
ncols = 13
maketiles(nfiles,nrows,ncols)
def test_io(self):
b1 = Raster()
self.assertEqual(b1.y,0)
test_IO_suite = unittest.TestLoader().loadTestsFromTestCase(TestIO)
| true | true |
f7f4d32b0d4c21102074690da7e79ca0ebfccf27 | 38 | py | Python | workflowV2/__init__.py | neal-p/workflow2.0 | 65a71754310051ddb3fd4338ff579c499608a483 | [
"MIT"
] | null | null | null | workflowV2/__init__.py | neal-p/workflow2.0 | 65a71754310051ddb3fd4338ff579c499608a483 | [
"MIT"
] | 1 | 2021-07-22T15:19:58.000Z | 2021-07-22T15:19:58.000Z | workflowV2/__init__.py | neal-p/workflowV2 | 65a71754310051ddb3fd4338ff579c499608a483 | [
"MIT"
] | null | null | null | __logfile__ = None
__logging__ = True
| 12.666667 | 18 | 0.789474 | __logfile__ = None
__logging__ = True
| true | true |
f7f4d3b9535ce4d4fb1b20268ca4d5034dd6d28a | 119 | py | Python | src/__init__.py | superserver/MinecraftServerAutoDeploy | d2af2b4528572924c83e80b05ceee69166803083 | [
"Apache-2.0"
] | 1 | 2019-01-31T14:08:24.000Z | 2019-01-31T14:08:24.000Z | src/__init__.py | superserver/MinecraftServerAutoDeploy | d2af2b4528572924c83e80b05ceee69166803083 | [
"Apache-2.0"
] | null | null | null | src/__init__.py | superserver/MinecraftServerAutoDeploy | d2af2b4528572924c83e80b05ceee69166803083 | [
"Apache-2.0"
] | null | null | null | from src import init
from src import start
from src import pull
from src import push
print("You have imported package") | 23.8 | 34 | 0.806723 | from src import init
from src import start
from src import pull
from src import push
print("You have imported package") | true | true |
f7f4d3f2f63044301a2b8de2037559d9879ff3de | 1,815 | py | Python | django_mini_fastapi/fastapi/concurrency.py | hanyichiu/django-mini-fastapi | 911340319d4be28634ed49b90b862adf18b4e79a | [
"MIT"
] | null | null | null | django_mini_fastapi/fastapi/concurrency.py | hanyichiu/django-mini-fastapi | 911340319d4be28634ed49b90b862adf18b4e79a | [
"MIT"
] | null | null | null | django_mini_fastapi/fastapi/concurrency.py | hanyichiu/django-mini-fastapi | 911340319d4be28634ed49b90b862adf18b4e79a | [
"MIT"
] | null | null | null | from typing import Any, Callable
# from starlette.concurrency import iterate_in_threadpool as iterate_in_threadpool # noqa
# from starlette.concurrency import run_in_threadpool as run_in_threadpool # noqa
# from starlette.concurrency import ( # noqa
# run_until_first_complete as run_until_first_complete,
# )
from .mock import iterate_in_threadpool, run_in_threadpool, run_until_first_complete
asynccontextmanager_error_message = """
FastAPI's contextmanager_in_threadpool require Python 3.7 or above,
or the backport for Python 3.6, installed with:
pip install async-generator
"""
def _fake_asynccontextmanager(func: Callable[..., Any]) -> Callable[..., Any]:
def raiser(*args: Any, **kwargs: Any) -> Any:
raise RuntimeError(asynccontextmanager_error_message)
return raiser
try:
from contextlib import asynccontextmanager as asynccontextmanager # type: ignore
except ImportError:
try:
from async_generator import ( # type: ignore # isort: skip
asynccontextmanager as asynccontextmanager,
)
except ImportError: # pragma: no cover
asynccontextmanager = _fake_asynccontextmanager
try:
from contextlib import AsyncExitStack as AsyncExitStack # type: ignore
except ImportError:
try:
from async_exit_stack import AsyncExitStack as AsyncExitStack # type: ignore
except ImportError: # pragma: no cover
AsyncExitStack = None # type: ignore
@asynccontextmanager # type: ignore
async def contextmanager_in_threadpool(cm: Any) -> Any:
try:
yield await run_in_threadpool(cm.__enter__)
except Exception as e:
ok = await run_in_threadpool(cm.__exit__, type(e), e, None)
if not ok:
raise e
else:
await run_in_threadpool(cm.__exit__, None, None, None)
| 34.245283 | 90 | 0.730579 | from typing import Any, Callable
port iterate_in_threadpool, run_in_threadpool, run_until_first_complete
asynccontextmanager_error_message = """
FastAPI's contextmanager_in_threadpool require Python 3.7 or above,
or the backport for Python 3.6, installed with:
pip install async-generator
"""
def _fake_asynccontextmanager(func: Callable[..., Any]) -> Callable[..., Any]:
def raiser(*args: Any, **kwargs: Any) -> Any:
raise RuntimeError(asynccontextmanager_error_message)
return raiser
try:
from contextlib import asynccontextmanager as asynccontextmanager # type: ignore
except ImportError:
try:
from async_generator import ( # type: ignore # isort: skip
asynccontextmanager as asynccontextmanager,
)
except ImportError: # pragma: no cover
asynccontextmanager = _fake_asynccontextmanager
try:
from contextlib import AsyncExitStack as AsyncExitStack # type: ignore
except ImportError:
try:
from async_exit_stack import AsyncExitStack as AsyncExitStack # type: ignore
except ImportError: # pragma: no cover
AsyncExitStack = None # type: ignore
@asynccontextmanager # type: ignore
async def contextmanager_in_threadpool(cm: Any) -> Any:
try:
yield await run_in_threadpool(cm.__enter__)
except Exception as e:
ok = await run_in_threadpool(cm.__exit__, type(e), e, None)
if not ok:
raise e
else:
await run_in_threadpool(cm.__exit__, None, None, None)
| true | true |
f7f4d41a60fd28a6ed65ec2c50399477fba862f2 | 4,184 | py | Python | trac/tests/functional/svntestenv.py | tiagoeckhardt/trac | b18c226195bfed8cd19cba97c6f03bd54dbbc044 | [
"BSD-3-Clause"
] | null | null | null | trac/tests/functional/svntestenv.py | tiagoeckhardt/trac | b18c226195bfed8cd19cba97c6f03bd54dbbc044 | [
"BSD-3-Clause"
] | null | null | null | trac/tests/functional/svntestenv.py | tiagoeckhardt/trac | b18c226195bfed8cd19cba97c6f03bd54dbbc044 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import os
import re
from subprocess import call
from testenv import FunctionalTestEnvironment
from trac.util.compat import close_fds
class SvnFunctionalTestEnvironment(FunctionalTestEnvironment):
def work_dir(self):
return os.path.join(self.dirname, 'workdir')
def repo_path(self, filename):
return os.path.join(self.dirname, filename)
def repo_path_for_initenv(self):
return self.repo_path('repo')
def create_repo(self):
"""
Initialize a repo of the type :attr:`self.repotype`.
"""
self.svnadmin_create()
if call(['svn', 'co', self.repo_url(), self.work_dir()],
stdout=self.logfile, stderr=self.logfile,
close_fds=close_fds):
raise Exception('Checkout from %s failed.' % self.repo_url())
def destroy_repo(self):
"""The deletion of the test environment will remove the
repo as well."""
pass
def post_create(self, env):
"""Hook for modifying the environment after creation."""
self._tracadmin('config', 'set', 'repositories',
'.sync_per_request', '1')
def repo_url(self):
"""Returns the url of the Subversion repository for this test
environment.
"""
repodir = self.repo_path_for_initenv()
if os.name == 'nt':
return 'file:///' + repodir.replace("\\", "/")
else:
return 'file://' + repodir
def svnadmin_create(self, filename=None):
"""Subversion helper to create a new repository."""
if filename is None:
path = self.repo_path_for_initenv()
else:
path = self.repo_path(filename)
if call(["svnadmin", "create", path],
stdout=self.logfile, stderr=self.logfile, close_fds=close_fds):
raise Exception('unable to create subversion repository: %r' %
path)
return path
def svn_mkdir(self, paths, msg, username='admin'):
"""Subversion helper to create a new directory within the main
repository. Operates directly on the repository url, so a working
copy need not exist.
Example::
self._testenv.svn_mkdir(["abc", "def"], "Add dirs")
"""
self.call_in_workdir(['svn', '--username=%s' % username,
'mkdir', '-m', msg]
+ [self.repo_url() + '/' + d for d in paths])
self.call_in_workdir(['svn', 'update'])
def svn_add(self, filename, data, msg=None, username='admin'):
"""Subversion helper to add a file to the given path within the main
repository.
Example::
self._testenv.svn_add("root.txt", "Hello World")
"""
with open(os.path.join(self.work_dir(), filename), 'w') as f:
f.write(data)
self.call_in_workdir(['svn', 'add', filename])
environ = os.environ.copy()
environ['LC_ALL'] = 'C' # Force English messages in svn
msg = 'Add %s' % filename if msg is None else msg
output = self.call_in_workdir(['svn', '--username=%s' % username,
'commit', '-m', msg, filename],
environ=environ)
try:
revision = re.search(r'Committed revision ([0-9]+)\.',
output).group(1)
except Exception as e:
args = e.args + (output, )
raise Exception(*args)
return int(revision)
def call_in_workdir(self, args, environ=None):
return self.call_in_dir(self.work_dir(), args, environ)
| 35.760684 | 79 | 0.588671 |
import os
import re
from subprocess import call
from testenv import FunctionalTestEnvironment
from trac.util.compat import close_fds
class SvnFunctionalTestEnvironment(FunctionalTestEnvironment):
def work_dir(self):
return os.path.join(self.dirname, 'workdir')
def repo_path(self, filename):
return os.path.join(self.dirname, filename)
def repo_path_for_initenv(self):
return self.repo_path('repo')
def create_repo(self):
self.svnadmin_create()
if call(['svn', 'co', self.repo_url(), self.work_dir()],
stdout=self.logfile, stderr=self.logfile,
close_fds=close_fds):
raise Exception('Checkout from %s failed.' % self.repo_url())
def destroy_repo(self):
pass
def post_create(self, env):
self._tracadmin('config', 'set', 'repositories',
'.sync_per_request', '1')
def repo_url(self):
repodir = self.repo_path_for_initenv()
if os.name == 'nt':
return 'file:///' + repodir.replace("\\", "/")
else:
return 'file://' + repodir
def svnadmin_create(self, filename=None):
if filename is None:
path = self.repo_path_for_initenv()
else:
path = self.repo_path(filename)
if call(["svnadmin", "create", path],
stdout=self.logfile, stderr=self.logfile, close_fds=close_fds):
raise Exception('unable to create subversion repository: %r' %
path)
return path
def svn_mkdir(self, paths, msg, username='admin'):
self.call_in_workdir(['svn', '--username=%s' % username,
'mkdir', '-m', msg]
+ [self.repo_url() + '/' + d for d in paths])
self.call_in_workdir(['svn', 'update'])
def svn_add(self, filename, data, msg=None, username='admin'):
with open(os.path.join(self.work_dir(), filename), 'w') as f:
f.write(data)
self.call_in_workdir(['svn', 'add', filename])
environ = os.environ.copy()
environ['LC_ALL'] = 'C'
msg = 'Add %s' % filename if msg is None else msg
output = self.call_in_workdir(['svn', '--username=%s' % username,
'commit', '-m', msg, filename],
environ=environ)
try:
revision = re.search(r'Committed revision ([0-9]+)\.',
output).group(1)
except Exception as e:
args = e.args + (output, )
raise Exception(*args)
return int(revision)
def call_in_workdir(self, args, environ=None):
return self.call_in_dir(self.work_dir(), args, environ)
| true | true |
f7f4d41d2465d5a2cf784854e9dd0ea79988b37c | 1,596 | py | Python | cogs/ping.py | Ashwinshankar98/ClassMateBot | 99441c02107b649aedd4b57f34be12823d01ea74 | [
"MIT"
] | null | null | null | cogs/ping.py | Ashwinshankar98/ClassMateBot | 99441c02107b649aedd4b57f34be12823d01ea74 | [
"MIT"
] | 64 | 2021-11-25T22:13:19.000Z | 2021-12-05T00:25:05.000Z | cogs/ping.py | chandur626/ClassMateBot | 6946767a5f1aec6d3e4386615d9b1eefb27c07ab | [
"MIT"
] | 4 | 2021-10-31T19:42:00.000Z | 2021-11-28T09:55:32.000Z | # Copyright (c) 2021 War-Keeper
import discord
from discord.ext import commands
# ----------------------------------------------------------------------------------------------
# Returns the ping of the bot, useful for testing bot lag and as a simple functionality command
# ----------------------------------------------------------------------------------------------
class Helpful(commands.Cog):
def __init__(self, bot):
self.bot = bot
# -------------------------------------------------------------------------------------------------------
# Function: ping(self, ctx)
# Description: prints the current ping of the bot, used as a test function
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: prints the current ping of the bot, with an upper bound of 999999999 to avoid float errors
# -------------------------------------------------------------------------------------------------------
@commands.command()
async def ping(self, ctx):
# We set an upper bound on the ping of the bot to prevent float_infinity situations which crash testing
await ctx.send(f"Pong! My ping currently is {round(min(999999999, self.bot.latency * 1000))}ms")
graphType = "bar"
title = "Midterm grade distribution"
# -------------------------------------
# add the file to the bot's cog system
# -------------------------------------
def setup(bot):
bot.add_cog(Helpful(bot))
| 40.923077 | 111 | 0.47619 |
import discord
from discord.ext import commands
class Helpful(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ping(self, ctx):
await ctx.send(f"Pong! My ping currently is {round(min(999999999, self.bot.latency * 1000))}ms")
graphType = "bar"
title = "Midterm grade distribution"
# -------------------------------------
def setup(bot):
bot.add_cog(Helpful(bot))
| true | true |
f7f4d45e374d076f89128a81065c94720fb85fbb | 2,713 | py | Python | MLWorker/dataset_service.py | harymitchell/mscs-ml | 4e284c79c9c30926c7ca24ac8bf082b4cefadddc | [
"MIT"
] | 1 | 2018-05-31T14:58:49.000Z | 2018-05-31T14:58:49.000Z | MLWorker/dataset_service.py | harymitchell/mscs-ml | 4e284c79c9c30926c7ca24ac8bf082b4cefadddc | [
"MIT"
] | 7 | 2018-03-20T15:05:55.000Z | 2018-03-21T19:47:25.000Z | MLWorker/dataset_service.py | harymitchell/mscs-ml | 4e284c79c9c30926c7ca24ac8bf082b4cefadddc | [
"MIT"
] | null | null | null | import numpy as np
from numpy import ma
import pandas
from bson.objectid import ObjectId
from pymongo import MongoClient
from settings import TEST_MONGO_HOST, TEST_MONGO_PORT, TEST_MONGO_USERNAME, TEST_MONGO_PASSWORD
import gridfs
import pprint
import StringIO
class dataset_service (object):
"""Service which connects to Datasets via MongoDB"""
def __init__(self, mongo_uri=None, db=None, worker_id=None, client=None):
if client:
self.client = client
else:
self.client = MongoClient(mongo_uri)
self.db = self.client[db]
self.fs = gridfs.GridFS(self.db)
def retrieveAllDatasets(self):
"""Returns all datasets for worker"""
result = []
for dataset in self.db.datasets.find():
result.append(dataset)
return result
def getDatasetByID(self, identifier):
# print (type(identifier))
# print (identifier)
# if type(identifier) is dict:
# identifier = identifier['_id']
# print (identifier)
result = self.db.datasets.find_one({'_id': ObjectId(identifier)})
if result.get('useGridFile') and result.get('gridFile_id'):
result['data'] = self.fileToDF(result)
return result
def removeDataset(self, filter):
"""Removes all datasets for filter"""
self.db.datasets.remove(filter)
def updateDataset(self, dataset, set_obj):
"""Updates the given dataset"""
return self.db.datasets.update_one(
{'_id': dataset["_id"]}, set_obj,
upsert=False)
def insertDataset(self, dataset):
"""Inserts the given dataset"""
return self.db.datasets.insert(dataset)
def fileToDF(self, dataset):
"""Returns a pandas dataframe containing the data from gridFile_id"""
exists = self.fs.exists(dataset.get('gridFile_id'))
if exists:
file = self.fs.get(dataset.get('gridFile_id')) # names=None if dataset['hasHeaders'] == True else ['field'+str(i+1) for i in range(len(dataset['data'][0].items()))]
df = pandas.read_csv(file)
return df
return dataset.get('data')
def dataToNumpy(self, data):
"""Takes array of dict and returns numpy array
Currently, defaults to convert to float"""
df = pandas.DataFrame(data)
numpyMatrix = df.as_matrix().astype(np.float)
return numpyMatrix
@staticmethod
def floatFromString(s):
try:
return float(s)
except ValueError:
return None
| 35.697368 | 177 | 0.598968 | import numpy as np
from numpy import ma
import pandas
from bson.objectid import ObjectId
from pymongo import MongoClient
from settings import TEST_MONGO_HOST, TEST_MONGO_PORT, TEST_MONGO_USERNAME, TEST_MONGO_PASSWORD
import gridfs
import pprint
import StringIO
class dataset_service (object):
def __init__(self, mongo_uri=None, db=None, worker_id=None, client=None):
if client:
self.client = client
else:
self.client = MongoClient(mongo_uri)
self.db = self.client[db]
self.fs = gridfs.GridFS(self.db)
def retrieveAllDatasets(self):
result = []
for dataset in self.db.datasets.find():
result.append(dataset)
return result
def getDatasetByID(self, identifier):
result = self.db.datasets.find_one({'_id': ObjectId(identifier)})
if result.get('useGridFile') and result.get('gridFile_id'):
result['data'] = self.fileToDF(result)
return result
def removeDataset(self, filter):
self.db.datasets.remove(filter)
def updateDataset(self, dataset, set_obj):
return self.db.datasets.update_one(
{'_id': dataset["_id"]}, set_obj,
upsert=False)
def insertDataset(self, dataset):
return self.db.datasets.insert(dataset)
def fileToDF(self, dataset):
exists = self.fs.exists(dataset.get('gridFile_id'))
if exists:
file = self.fs.get(dataset.get('gridFile_id'))
df = pandas.read_csv(file)
return df
return dataset.get('data')
def dataToNumpy(self, data):
df = pandas.DataFrame(data)
numpyMatrix = df.as_matrix().astype(np.float)
return numpyMatrix
@staticmethod
def floatFromString(s):
try:
return float(s)
except ValueError:
return None
| true | true |
f7f4d4c1ca4024665ad09d816148fd725f5740fa | 7,742 | py | Python | svm.py | AliMakiGmail/SFD-CNN-TL | 96890a086cb170334f761a825a5fdcdc51444696 | [
"MIT"
] | 27 | 2018-09-12T12:00:44.000Z | 2022-03-20T07:33:01.000Z | svm.py | AliMakiGmail/SFD-CNN-TL | 96890a086cb170334f761a825a5fdcdc51444696 | [
"MIT"
] | 2 | 2020-01-13T16:35:50.000Z | 2020-09-07T07:10:12.000Z | svm.py | AliMakiGmail/SFD-CNN-TL | 96890a086cb170334f761a825a5fdcdc51444696 | [
"MIT"
] | 16 | 2018-08-11T14:41:09.000Z | 2021-10-31T13:24:32.000Z | #!/usr/bin/env python
# Copyright 2019 Augusto Cunha and Axelle Pochet
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this code and
# associated documentation files, to deal in the code without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the code, and to permit persons to whom the code is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the code.
#
# THE CODE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE CODE OR THE USE OR OTHER DEALINGS IN THE CODE.
__license__ = "MIT"
__author__ = "Augusto Cunha, Axelle Pochet"
__email__ = "acunha@tecgraf.puc-rio.br, axelle@tecgraf.puc-rio.br"
__credits__ = ["Augusto Cunha", "Axelle Pochet", "Helio Lopes", "Marcelo Gattass"]
################# all imports #################
from __future__ import print_function
import numpy, os, time
import pandas as pd
from tensorflow import set_random_seed
numpy.random.seed(1337)
set_random_seed(1337)
from keras.models import model_from_json
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn import svm
import metrics
from sklearn.externals import joblib
def load_model(modelJsonPath, modelWeightsPath):
################# load base model #################
jsonFile = open(modelJsonPath, 'r')
loadedModelJson = jsonFile.read()
jsonFile.close()
base_model = model_from_json(loadedModelJson)
base_model.load_weights(modelWeightsPath)
# remove last layers
for i in range (7):
base_model.layers.pop()
base_model.outputs = [base_model.layers[-1].output]
# freeze layers
for layer in base_model.layers[:7]:
layer.trainable = False
return base_model
def data(X_train, Y_train, numberOfClasses = 2):
x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.2, shuffle=True, random_state=1337)
return x_train, y_train, x_test, y_test
def dataCV(trainFaultDirectory='dataset/fault/',trainNonFaultDirectory='dataset/nonfault/', modelJsonPath = 'base_model/model.json', modelWeightsPath = 'base_model/model.h5'):
trainFaultURLList = os.listdir(trainFaultDirectory)
trainNonFaultURLList = os.listdir(trainNonFaultDirectory)
# read and save
trainImageDataList = []
trainClassesList = []
for imageURL in trainFaultURLList:
csv_file = trainFaultDirectory + imageURL
df = pd.read_csv(csv_file, delimiter=' ', header = None)
trainImageDataList.append(df.values)
trainClassesList.append(1)
for imageURL in trainNonFaultURLList:
csv_file = trainNonFaultDirectory + imageURL
df = pd.read_csv(csv_file, delimiter=' ', header = None)
trainImageDataList.append(df.values)
trainClassesList.append(0)
# sparsify labels
Y = trainClassesList
# pass input as numpy arrays
imageRows = 45
imageCollumns = 45
imageChannels = 1
trainSamplesList = numpy.array( trainImageDataList)
trainSamplesList = trainSamplesList.reshape( trainSamplesList.shape[0], imageRows, imageCollumns, imageChannels )
trainSamplesList = trainSamplesList.astype( 'float32' )
X = trainSamplesList
## extract features as new input
X = load_model(modelJsonPath, modelWeightsPath).predict(X)
x_train = X
y_train = Y
x_test = []
y_test = []
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test, numFolds= 5, c=1, k='linear', save = True, baseName='femlpModel'):
"""
Model providing function:
Create Keras model with SVM as classifier, compile test and generate metrics.
"""
################# define SVM #################
clf = svm.SVC(kernel = k, C = c, probability=True, random_state=1337)
clf.fit(x_train, y_train)
# Classify
y = np_utils.to_categorical(y_test, 2)
classesPredictionList = clf.predict(x_test) # 0 or 1
classesProbaPredictionList = clf.predict_proba(x_test) # probability
sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(classesPredictionList,classesProbaPredictionList,y,verbose=False)
if(save):
joblib.dump(clf, "output/"+baseName+".pkl")
print("Accuracy: {:.4f}".format(accuracy))
print("Sensitivity: {:.4f}".format(sensitivity))
print("Specificity: {:.4f}".format(specificity))
print("F1 Score: {:.4f}".format(F1_score))
print("AUC: {:.4f}".format(auc))
def create_modelCV(x_train, y_train, x_test, y_test, numFolds= 5, c=1, k='linear'):
"""
Model providing function:
Create Keras model with SVM as classifier, compile test and generate metrics.
"""
### Cross-validation
skf = StratifiedKFold(n_splits=numFolds, shuffle=True, random_state=1337)
X = x_train
Y = y_train
sensitivitys, specificitys, accuracys, precisions, recalls, F1_scores, aucs = [[],[],[],[],[],[],[]]
#kpbar = tqdm(total=numFolds, desc="Kfold", leave=False)
y = np_utils.to_categorical(Y, 2)
Y = numpy.array(Y)
for train_index, test_index in skf.split(X, Y):
################# define SVM #################
clf = svm.SVC(kernel = k, C = c, probability=True, random_state=1337)
clf.fit(X[train_index], Y[train_index])
# Classify
classesPredictionList = clf.predict(X[test_index]) # 0 or 1
classesProbaPredictionList = clf.predict_proba(X[test_index]) # probability
sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(classesPredictionList,classesProbaPredictionList,y[test_index],verbose=False)
sensitivitys.append(sensitivity)
specificitys.append(specificity)
accuracys.append(accuracy)
precisions.append(precision)
recalls.append(recall)
F1_scores.append(F1_score)
aucs.append(auc)
sensitivitys = numpy.array(sensitivitys)
specificitys = numpy.array(specificitys)
accuracys = numpy.array(accuracys)
precisions = numpy.array(precisions)
recalls = numpy.array(recalls)
F1_scores = numpy.array(F1_scores)
aucs = numpy.array(aucs)
print("Mean Accuracy: {:.4f} (+/- {:.4f})".format(accuracys.mean(), accuracys.std()))
print("Mean Sensitivity: {:.4f} (+/- {:.4f})".format(sensitivitys.mean(), sensitivitys.std()))
print("Mean Specificity: {:.4f} (+/- {:.4f})".format(specificitys.mean(), specificitys.std()))
print("Mean F1 Score: {:.4f} (+/- {:.4f})".format(F1_scores.mean(), F1_scores.std()))
print("Mean AUC: {:.4f} (+/- {:.4f})".format(aucs.mean(), aucs.std()))
if __name__ == '__main__':
start_time = time.time()
print("Loading dataset...")
X_train, Y_train, X_test, Y_test = dataCV()
x_train, y_train, x_test, y_test = data(X_train, Y_train)
print("Training...")
create_model(x_train, y_train, x_test, y_test, numFolds=5, c=10, k='rbf')
print("Training with cross validation...")
create_modelCV(X_train, Y_train, X_test, Y_test, numFolds=5, c=10, k='rbf')
print("--- {:.1f} seconds ---".format(time.time() - start_time))
| 42.306011 | 181 | 0.691036 |
__license__ = "MIT"
__author__ = "Augusto Cunha, Axelle Pochet"
__email__ = "acunha@tecgraf.puc-rio.br, axelle@tecgraf.puc-rio.br"
__credits__ = ["Augusto Cunha", "Axelle Pochet", "Helio Lopes", "Marcelo Gattass"]
miter=' ', header = None)
trainImageDataList.append(df.values)
trainClassesList.append(1)
for imageURL in trainNonFaultURLList:
csv_file = trainNonFaultDirectory + imageURL
df = pd.read_csv(csv_file, delimiter=' ', header = None)
trainImageDataList.append(df.values)
trainClassesList.append(0)
Y = trainClassesList
imageRows = 45
imageCollumns = 45
imageChannels = 1
trainSamplesList = numpy.array( trainImageDataList)
trainSamplesList = trainSamplesList.reshape( trainSamplesList.shape[0], imageRows, imageCollumns, imageChannels )
trainSamplesList = trainSamplesList.astype( 'float32' )
X = trainSamplesList
th, modelWeightsPath).predict(X)
x_train = X
y_train = Y
x_test = []
y_test = []
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test, numFolds= 5, c=1, k='linear', save = True, baseName='femlpModel'):
eate_modelCV(x_train, y_train, x_test, y_test, numFolds= 5, c=1, k='linear'):
umFolds, shuffle=True, random_state=1337)
X = x_train
Y = y_train
sensitivitys, specificitys, accuracys, precisions, recalls, F1_scores, aucs = [[],[],[],[],[],[],[]]
y = np_utils.to_categorical(Y, 2)
Y = numpy.array(Y)
for train_index, test_index in skf.split(X, Y):
ficitys = numpy.array(specificitys)
accuracys = numpy.array(accuracys)
precisions = numpy.array(precisions)
recalls = numpy.array(recalls)
F1_scores = numpy.array(F1_scores)
aucs = numpy.array(aucs)
print("Mean Accuracy: {:.4f} (+/- {:.4f})".format(accuracys.mean(), accuracys.std()))
print("Mean Sensitivity: {:.4f} (+/- {:.4f})".format(sensitivitys.mean(), sensitivitys.std()))
print("Mean Specificity: {:.4f} (+/- {:.4f})".format(specificitys.mean(), specificitys.std()))
print("Mean F1 Score: {:.4f} (+/- {:.4f})".format(F1_scores.mean(), F1_scores.std()))
print("Mean AUC: {:.4f} (+/- {:.4f})".format(aucs.mean(), aucs.std()))
if __name__ == '__main__':
start_time = time.time()
print("Loading dataset...")
X_train, Y_train, X_test, Y_test = dataCV()
x_train, y_train, x_test, y_test = data(X_train, Y_train)
print("Training...")
create_model(x_train, y_train, x_test, y_test, numFolds=5, c=10, k='rbf')
print("Training with cross validation...")
create_modelCV(X_train, Y_train, X_test, Y_test, numFolds=5, c=10, k='rbf')
print("--- {:.1f} seconds ---".format(time.time() - start_time))
| true | true |
f7f4d4f97d862b6d4a11c151ec3f9909b5d29c35 | 1,849 | py | Python | main/src/mecab.py | seven320/metamon_code | 48b3acde55f5ef2d062586a3c8fc792d8b4a4025 | [
"MIT"
] | 13 | 2020-01-22T12:09:10.000Z | 2021-05-26T16:03:36.000Z | main/src/mecab.py | seven320/metamon_code | 48b3acde55f5ef2d062586a3c8fc792d8b4a4025 | [
"MIT"
] | 15 | 2020-01-20T18:46:31.000Z | 2021-12-12T11:27:35.000Z | main/src/mecab.py | seven320/metamon_code | 48b3acde55f5ef2d062586a3c8fc792d8b4a4025 | [
"MIT"
] | 2 | 2020-01-17T14:59:14.000Z | 2020-04-12T12:13:25.000Z | #encoding:utf-8
import sys
import MeCab
import tweepy
#親ディレクトリにあるアカウント情報へのパス
import sys,os
pardir=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pardir)
#account情報をaccount.pyからロード
from account import account #load account
text="月が綺麗ですね。メロンパン。メロン。パン"
test = "豚汁垢、徹底的に中の人要素を排除して一貫性のあるツイートこころがけて話題性提供してめちゃくちゃ自発してふぁぼ爆して5日くらいでフォロー500フォロワー200?くらいにしたのなかなかじゃないか? 自業自得とはいえつらいからみんか褒めて"
# nm=MeCab
def nlp(text):#natural language processing
m = MeCab.Tagger("-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd")
# print(m.parse(text))
return m.parse(text)
def text_to_noun(text):
frequency_dic = {}
frequency_verb = {}
m = MeCab.Tagger("-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd")s
#自然言語処理
node = m.parse(text)
pos = node.split("\n")#単語ごとに切ってリストに格納
for i in range(len(pos)):
if "名詞" in pos[i]:#名詞だけ抽出
# print(pos[i])
noun = pos[i].split("\t")[0]
if noun in frequency_dic.keys():
frequency_dic[noun]+=1
else:
frequency_dic.update({noun:1})
if "動詞" in pos[i]:#動詞だけ抽出
print(pos[i])
verb = pos[i].split("\t")[0]
if verb in frequency_verb.keys():
frequency_verb[verb]+=1
else:
frequency_verb.update({verb:1})
print("名詞一覧:",frequency_dic)
print("動詞一覧:",frequency_verb)
def main():
auth = account.Initialize()
api = tweepy.API(auth)
twitter_id=account.id()
public_tweets = api.home_timeline(count=10)
for tweet in public_tweets:
print("\n"+tweet.user.name)
# print(tweet.user.screen_name)#@以下のID
print(tweet.text)
print(text_to_noun(tweet.text))
if __name__=="__main__":
# sprit_text_to_noun(text)
# main()
print(nlp(test))
| 27.191176 | 125 | 0.633856 |
import sys
import MeCab
import tweepy
import sys,os
pardir=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pardir)
from account import account
text="月が綺麗ですね。メロンパン。メロン。パン"
test = "豚汁垢、徹底的に中の人要素を排除して一貫性のあるツイートこころがけて話題性提供してめちゃくちゃ自発してふぁぼ爆して5日くらいでフォロー500フォロワー200?くらいにしたのなかなかじゃないか? 自業自得とはいえつらいからみんか褒めて"
def nlp(text):
m = MeCab.Tagger("-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd")
return m.parse(text)
def text_to_noun(text):
frequency_dic = {}
frequency_verb = {}
m = MeCab.Tagger("-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd")s
node = m.parse(text)
pos = node.split("\n")
for i in range(len(pos)):
if "名詞" in pos[i]:
noun = pos[i].split("\t")[0]
if noun in frequency_dic.keys():
frequency_dic[noun]+=1
else:
frequency_dic.update({noun:1})
if "動詞" in pos[i]:
print(pos[i])
verb = pos[i].split("\t")[0]
if verb in frequency_verb.keys():
frequency_verb[verb]+=1
else:
frequency_verb.update({verb:1})
print("名詞一覧:",frequency_dic)
print("動詞一覧:",frequency_verb)
def main():
auth = account.Initialize()
api = tweepy.API(auth)
twitter_id=account.id()
public_tweets = api.home_timeline(count=10)
for tweet in public_tweets:
print("\n"+tweet.user.name)
print(tweet.text)
print(text_to_noun(tweet.text))
if __name__=="__main__":
print(nlp(test))
| false | true |
f7f4d523226aba579fff89dd465bb742f4a28d04 | 435 | py | Python | setup.py | rodrigocam/whaler | 1b1fe275eab690c410cf46de449f431e642da907 | [
"MIT"
] | null | null | null | setup.py | rodrigocam/whaler | 1b1fe275eab690c410cf46de449f431e642da907 | [
"MIT"
] | null | null | null | setup.py | rodrigocam/whaler | 1b1fe275eab690c410cf46de449f431e642da907 | [
"MIT"
] | null | null | null | import os
import re
from setuptools import setup, find_packages
init = open(os.path.join('src', 'whaler', '__init__.py')).read()
version = re.search(r"__version__ = '(\d+\.\d+.\d+)'", init).group(1)
setup(
name='whaler',
version=version,
package_dir={'': 'src'},
packages=find_packages('src'),
setup_requires='setuptools >= 30.3',
entry_points={
'console_scripts': ['whaler=whaler.cli:main'],
}
)
| 24.166667 | 69 | 0.632184 | import os
import re
from setuptools import setup, find_packages
init = open(os.path.join('src', 'whaler', '__init__.py')).read()
version = re.search(r"__version__ = '(\d+\.\d+.\d+)'", init).group(1)
setup(
name='whaler',
version=version,
package_dir={'': 'src'},
packages=find_packages('src'),
setup_requires='setuptools >= 30.3',
entry_points={
'console_scripts': ['whaler=whaler.cli:main'],
}
)
| true | true |
f7f4d5384abe2d0d7a8dd2981587302f1f05fb55 | 18,514 | py | Python | clevrer_dev/text_baseline/train_net.py | gabrielsluz/SlowFast | bd06eac47fa236b070fd9a3b39518eea08d02947 | [
"Apache-2.0"
] | null | null | null | clevrer_dev/text_baseline/train_net.py | gabrielsluz/SlowFast | bd06eac47fa236b070fd9a3b39518eea08d02947 | [
"Apache-2.0"
] | null | null | null | clevrer_dev/text_baseline/train_net.py | gabrielsluz/SlowFast | bd06eac47fa236b070fd9a3b39518eea08d02947 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import pprint
import torch
import copy
from torch.utils.data import DataLoader
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
from slowfast.utils.meters import ClevrerTrainMeter, ClevrerValMeter
#Clevrer specific
from slowfast.datasets.clevrer_text import Clevrertext, Clevrertext_join, Clevrertext_des, Clevrertext_mc
from slowfast.models.build import MODEL_REGISTRY
logger = logging.get_logger(__name__)
def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, test_imp=False
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (ClevrerTrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
test_counter = 0
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
for cur_iter, sampled_batch in enumerate(train_loader):
#Samples 2 batches. One for des and one for mc
#There are much more des, then some batches are only des
des_batch = sampled_batch['des']
des_q = des_batch['question_dict']['question']
des_ans = des_batch['question_dict']['ans']
des_len = des_batch['question_dict']['len']
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
des_q = des_q.cuda(non_blocking=True)
des_ans = des_ans.cuda()
des_len = des_len.cuda(non_blocking=True)
has_mc = sampled_batch['has_mc'][0]
if has_mc:
mc_batch = sampled_batch['mc']
mc_q = mc_batch['question_dict']['question']
mc_ans = mc_batch['question_dict']['ans']
mc_len = mc_batch['question_dict']['len']
if cfg.NUM_GPUS:
mc_q = mc_q.cuda(non_blocking=True)
mc_ans = mc_ans.cuda()
mc_len = mc_len.cuda(non_blocking=True)
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
#Separated batches
#Des
pred_des_ans = model(des_q, True)
des_loss_fun = losses.get_loss_func('cross_entropy')(reduction="mean")
loss = des_loss_fun(pred_des_ans, des_ans)
# check Nan Loss.
misc.check_nan_losses(loss)
#Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
#Save for stats
loss_des_val = loss
#MC
loss_mc_val = None
if has_mc:
pred_mc_ans = model(mc_q, False)
mc_loss_fun = losses.get_loss_func('bce_logit')(reduction="mean")
loss = mc_loss_fun(pred_mc_ans, mc_ans) #Multiply by 4
# check Nan Loss.
misc.check_nan_losses(loss)
#Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
#Save for stats
loss_mc_val = loss
# #Non separated Not updated for same batch 2 questions:
# pred_des_ans = model(des_q, True)
# pred_mc_ans = model(mc_q, False)
# # Explicitly declare reduction to mean.
# des_loss_fun = losses.get_loss_func('cross_entropy')(reduction="mean")
# mc_loss_fun = losses.get_loss_func('bce_logit')(reduction="mean")
# # Compute the loss.
# loss_des_val = des_loss_fun(pred_des_ans, des_ans)
# loss_mc_val = mc_loss_fun(pred_mc_ans, mc_ans)
# loss = loss_mc_val + loss_des_val
# # check Nan Loss.
# misc.check_nan_losses(loss)
# # Perform the backward pass.
# optimizer.zero_grad()
# loss.backward()
# # Update the parameters.
# optimizer.step()
top1_err, top5_err = None, None
# Compute the errors.
num_topks_correct = metrics.topks_correct(pred_des_ans, des_ans, (1, 5))
top1_err, top5_err = [
(1.0 - x / pred_des_ans.size(0)) * 100.0 for x in num_topks_correct
]
if has_mc:
diff_mc_ans = torch.abs(mc_ans - (torch.sigmoid(pred_mc_ans) >= 0.5).float()) #Errors
mc_opt_err = 100 * torch.true_divide(diff_mc_ans.sum(), (4*mc_q.size()[0]))
mc_q_err = 100 * torch.true_divide((diff_mc_ans.sum(dim=1, keepdim=True) != 0).float().sum(), mc_q.size()[0])
# Copy the stats from GPU to CPU (sync point).
loss_des_val, loss_mc_val, top1_err, top5_err, mc_opt_err, mc_q_err = (
loss_des_val.item(),
loss_mc_val.item(),
top1_err.item(),
top5_err.item(),
mc_opt_err.item(),
mc_q_err.item()
)
mb_size_mc = mc_q.size()[0]
else:
mc_opt_err, mc_q_err = None, None
mb_size_mc = None
loss_des_val, top1_err, top5_err = (
loss_des_val.item(),
top1_err.item(),
top5_err.item()
)
#top1_err, top5_err, mc_opt_err, mc_q_err, loss_des, loss_mc, lr, mb_size
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
mc_opt_err,
mc_q_err,
loss_des_val,
loss_mc_val,
lr,
des_q.size()[0],
mb_size_mc
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
#For testing implementation
if test_imp:
print(" --- Descriptive questions results --- ")
# print("Des_q")
# print(des_q)
print("Des_ans")
print(des_ans)
#print("Des_ans_pred")
#print(pred_des_ans)
print("Argmax => prediction")
print(torch.argmax(pred_des_ans, dim=1, keepdim=False))
print("Top1_err and Top5err")
print(top1_err, top5_err)
print("Loss_des_val = {}".format(loss_des_val))
if has_mc:
print(" --- Multiple Choice questions results --- ")
# print("Mc_q")
# print(mc_q)
# print("Mc errors pred x ans")
# print(torch.abs(mc_ans - (torch.sigmoid(pred_mc_ans) >= 0.5).float()))
print("mc_opt_err = {} \nmc_q_err = {}".format(mc_opt_err, mc_q_err))
print("Loss_mc_val = {}".format(loss_mc_val))
test_counter += 1
if test_counter == 4:
break
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, test_imp=False):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ClevrerValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
test_counter = 0
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, sampled_batch in enumerate(val_loader):
#Samples 2 batches. One for des and one for mc
#There are much more des, then some batches are only des
des_batch = sampled_batch['des']
des_q = des_batch['question_dict']['question']
des_ans = des_batch['question_dict']['ans']
des_len = des_batch['question_dict']['len']
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
des_q = des_q.cuda(non_blocking=True)
des_ans = des_ans.cuda()
des_len = des_len.cuda(non_blocking=True)
has_mc = sampled_batch['has_mc'][0]
if has_mc:
mc_batch = sampled_batch['mc']
mc_q = mc_batch['question_dict']['question']
mc_ans = mc_batch['question_dict']['ans']
mc_len = mc_batch['question_dict']['len']
if cfg.NUM_GPUS:
mc_q = mc_q.cuda(non_blocking=True)
mc_ans = mc_ans.cuda()
mc_len = mc_len.cuda(non_blocking=True)
val_meter.data_toc()
# Explicitly declare reduction to mean.
des_loss_fun = losses.get_loss_func('cross_entropy')(reduction="mean")
mc_loss_fun = losses.get_loss_func('bce_logit')(reduction="mean")
pred_des_ans = model(des_q, True)
loss_des_val = des_loss_fun(pred_des_ans, des_ans)
loss_mc_val = None
if has_mc:
pred_mc_ans = model(mc_q, False)
loss_mc_val = mc_loss_fun(pred_mc_ans, mc_ans)
# Compute the errors.
num_topks_correct = metrics.topks_correct(pred_des_ans, des_ans, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / pred_des_ans.size(0)) * 100.0 for x in num_topks_correct
]
if has_mc:
diff_mc_ans = torch.abs(mc_ans - (torch.sigmoid(pred_mc_ans) >= 0.5).float()) #Errors
mc_opt_err = 100 * torch.true_divide(diff_mc_ans.sum(), (4*mc_q.size()[0]))
mc_q_err = 100 * torch.true_divide((diff_mc_ans.sum(dim=1, keepdim=True) != 0).float().sum(), mc_q.size()[0])
# Copy the stats from GPU to CPU (sync point).
loss_des_val, loss_mc_val, top1_err, top5_err, mc_opt_err, mc_q_err = (
loss_des_val.item(),
loss_mc_val.item(),
top1_err.item(),
top5_err.item(),
mc_opt_err.item(),
mc_q_err.item()
)
mb_size_mc = mc_q.size()[0]
else:
mc_opt_err, mc_q_err = None, None
mb_size_mc = None
loss_des_val, top1_err, top5_err = (
loss_des_val.item(),
top1_err.item(),
top5_err.item()
)
val_meter.iter_toc()
#top1_err, top5_err, mc_opt_err, mc_q_err, loss_des, loss_mc, mb_size_des, mb_size_mc
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
mc_opt_err,
mc_q_err,
loss_des_val,
loss_mc_val,
des_q.size()[0],
mb_size_mc
)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
#For testing implementation
if test_imp:
print(" --- Descriptive questions results --- ")
# print("Des_q")
# print(des_q)
print("Des_ans")
print(des_ans)
#print("Des_ans_pred")
#print(pred_des_ans)
print("Argmax => prediction")
print(torch.argmax(pred_des_ans, dim=1, keepdim=False))
print("Top1_err and Top5err")
print(top1_err, top5_err)
print("Loss_des_val = {}".format(loss_des_val))
if has_mc:
print(" --- Multiple Choice questions results --- ")
# print("Mc_q")
# print(mc_q)
# print("Mc errors pred x ans")
# print(torch.abs(mc_ans - (torch.sigmoid(pred_mc_ans) >= 0.5).float()))
print("mc_opt_err = {} \nmc_q_err = {}".format(mc_opt_err, mc_q_err))
print("Loss_mc_val = {}".format(loss_mc_val))
test_counter += 1
if test_counter == 4:
break
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
val_meter.reset()
def build_clevrer_model(cfg, gpu_id=None):
"""
Builds and returns a CLEVRER Text model
It is a separated function because it CLEVRER receives dataset specific parameters
"""
dataset = Clevrertext(cfg, 'train')
vocab_len = dataset.get_vocab_len()
ans_vocab_len = dataset.get_ans_vocab_len()
vocab = dataset.get_vocab()
if torch.cuda.is_available():
assert (
cfg.NUM_GPUS <= torch.cuda.device_count()
), "Cannot use more GPU devices than available"
else:
assert (
cfg.NUM_GPUS == 0
), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs."
# Construct the model
name = cfg.MODEL.MODEL_NAME
model = MODEL_REGISTRY.get(name)(cfg, vocab_len, ans_vocab_len, vocab)
if cfg.NUM_GPUS:
if gpu_id is None:
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
else:
cur_device = gpu_id
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[cur_device], output_device=cur_device
)
return model
def build_dataloader(cfg, mode):
des_dataset = Clevrertext_des(cfg, mode)
mc_dataset = Clevrertext_mc(cfg, mode)
dataset = Clevrertext_join(des_dataset, mc_dataset)
dataloader = DataLoader(dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
shuffle= mode=='train', num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY)
return dataloader
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
model = build_clevrer_model(cfg)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
# Create the video train and val loaders.
if cfg.TRAIN.DATASET != 'Clevrertext_join':
print("This train script does not support your dataset: -{}-. Only Clevrertext_join".format(cfg.TRAIN.DATASET))
exit()
# Create the video train and val loaders.
train_loader = build_dataloader(cfg, "train")
val_loader = build_dataloader(cfg, "val")
# Create meters.
train_meter = ClevrerTrainMeter(len(train_loader), cfg)
val_meter = ClevrerValMeter(len(val_loader), cfg)
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
# Shuffle the dataset.
#loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None
)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg)
def test_implementation(cfg):
"""
Simulates a train and val epoch to check if the gradients are being updated,
metrics are being calculated correctly
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Test implementation")
# Build the video model and print model statistics.
model = build_clevrer_model(cfg)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
# Create the video train and val loaders.
if cfg.TRAIN.DATASET != 'Clevrertext_join':
print("This train script does not support your dataset: -{}-. Only Clevrertext_join".format(cfg.TRAIN.DATASET))
train_loader = build_dataloader(cfg, "train")
val_loader = build_dataloader(cfg, "val")
# Create meters.
train_meter = ClevrerTrainMeter(len(train_loader), cfg)
val_meter = ClevrerValMeter(len(val_loader), cfg)
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
# Train for one epoch.
model_before = copy.deepcopy(model)
cur_epoch = start_epoch
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, test_imp=True
)
print("Check if parameters changed")
for (p_b_name, p_b), (p_name, p) in zip(model_before.named_parameters(), model.named_parameters()):
if p.requires_grad:
print("Parameter requires grad:")
print(p_name, p_b_name)
assert (p_b != p).any()
print("--Check--")
else:
print("Parameter does not require grad:")
print(p_name)
print(p)
print("Val epoch")
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, test_imp=True) | 37.028 | 121 | 0.607918 |
import numpy as np
import pprint
import torch
import copy
from torch.utils.data import DataLoader
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
from slowfast.utils.meters import ClevrerTrainMeter, ClevrerValMeter
from slowfast.datasets.clevrer_text import Clevrertext, Clevrertext_join, Clevrertext_des, Clevrertext_mc
from slowfast.models.build import MODEL_REGISTRY
logger = logging.get_logger(__name__)
def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, test_imp=False
):
test_counter = 0
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
for cur_iter, sampled_batch in enumerate(train_loader):
des_batch = sampled_batch['des']
des_q = des_batch['question_dict']['question']
des_ans = des_batch['question_dict']['ans']
des_len = des_batch['question_dict']['len']
if cfg.NUM_GPUS:
des_q = des_q.cuda(non_blocking=True)
des_ans = des_ans.cuda()
des_len = des_len.cuda(non_blocking=True)
has_mc = sampled_batch['has_mc'][0]
if has_mc:
mc_batch = sampled_batch['mc']
mc_q = mc_batch['question_dict']['question']
mc_ans = mc_batch['question_dict']['ans']
mc_len = mc_batch['question_dict']['len']
if cfg.NUM_GPUS:
mc_q = mc_q.cuda(non_blocking=True)
mc_ans = mc_ans.cuda()
mc_len = mc_len.cuda(non_blocking=True)
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
pred_des_ans = model(des_q, True)
des_loss_fun = losses.get_loss_func('cross_entropy')(reduction="mean")
loss = des_loss_fun(pred_des_ans, des_ans)
misc.check_nan_losses(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_des_val = loss
loss_mc_val = None
if has_mc:
pred_mc_ans = model(mc_q, False)
mc_loss_fun = losses.get_loss_func('bce_logit')(reduction="mean")
loss = mc_loss_fun(pred_mc_ans, mc_ans)
misc.check_nan_losses(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_mc_val = loss
None
num_topks_correct = metrics.topks_correct(pred_des_ans, des_ans, (1, 5))
top1_err, top5_err = [
(1.0 - x / pred_des_ans.size(0)) * 100.0 for x in num_topks_correct
]
if has_mc:
diff_mc_ans = torch.abs(mc_ans - (torch.sigmoid(pred_mc_ans) >= 0.5).float())
mc_opt_err = 100 * torch.true_divide(diff_mc_ans.sum(), (4*mc_q.size()[0]))
mc_q_err = 100 * torch.true_divide((diff_mc_ans.sum(dim=1, keepdim=True) != 0).float().sum(), mc_q.size()[0])
loss_des_val, loss_mc_val, top1_err, top5_err, mc_opt_err, mc_q_err = (
loss_des_val.item(),
loss_mc_val.item(),
top1_err.item(),
top5_err.item(),
mc_opt_err.item(),
mc_q_err.item()
)
mb_size_mc = mc_q.size()[0]
else:
mc_opt_err, mc_q_err = None, None
mb_size_mc = None
loss_des_val, top1_err, top5_err = (
loss_des_val.item(),
top1_err.item(),
top5_err.item()
)
train_meter.update_stats(
top1_err,
top5_err,
mc_opt_err,
mc_q_err,
loss_des_val,
loss_mc_val,
lr,
des_q.size()[0],
mb_size_mc
)
train_meter.iter_toc()
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
if test_imp:
print(" --- Descriptive questions results --- ")
print("Des_ans")
print(des_ans)
print("Argmax => prediction")
print(torch.argmax(pred_des_ans, dim=1, keepdim=False))
print("Top1_err and Top5err")
print(top1_err, top5_err)
print("Loss_des_val = {}".format(loss_des_val))
if has_mc:
print(" --- Multiple Choice questions results --- ")
print("mc_opt_err = {} \nmc_q_err = {}".format(mc_opt_err, mc_q_err))
print("Loss_mc_val = {}".format(loss_mc_val))
test_counter += 1
if test_counter == 4:
break
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, test_imp=False):
test_counter = 0
model.eval()
val_meter.iter_tic()
for cur_iter, sampled_batch in enumerate(val_loader):
des_batch = sampled_batch['des']
des_q = des_batch['question_dict']['question']
des_ans = des_batch['question_dict']['ans']
des_len = des_batch['question_dict']['len']
if cfg.NUM_GPUS:
des_q = des_q.cuda(non_blocking=True)
des_ans = des_ans.cuda()
des_len = des_len.cuda(non_blocking=True)
has_mc = sampled_batch['has_mc'][0]
if has_mc:
mc_batch = sampled_batch['mc']
mc_q = mc_batch['question_dict']['question']
mc_ans = mc_batch['question_dict']['ans']
mc_len = mc_batch['question_dict']['len']
if cfg.NUM_GPUS:
mc_q = mc_q.cuda(non_blocking=True)
mc_ans = mc_ans.cuda()
mc_len = mc_len.cuda(non_blocking=True)
val_meter.data_toc()
des_loss_fun = losses.get_loss_func('cross_entropy')(reduction="mean")
mc_loss_fun = losses.get_loss_func('bce_logit')(reduction="mean")
pred_des_ans = model(des_q, True)
loss_des_val = des_loss_fun(pred_des_ans, des_ans)
loss_mc_val = None
if has_mc:
pred_mc_ans = model(mc_q, False)
loss_mc_val = mc_loss_fun(pred_mc_ans, mc_ans)
num_topks_correct = metrics.topks_correct(pred_des_ans, des_ans, (1, 5))
top1_err, top5_err = [
(1.0 - x / pred_des_ans.size(0)) * 100.0 for x in num_topks_correct
]
if has_mc:
diff_mc_ans = torch.abs(mc_ans - (torch.sigmoid(pred_mc_ans) >= 0.5).float())
mc_opt_err = 100 * torch.true_divide(diff_mc_ans.sum(), (4*mc_q.size()[0]))
mc_q_err = 100 * torch.true_divide((diff_mc_ans.sum(dim=1, keepdim=True) != 0).float().sum(), mc_q.size()[0])
loss_des_val, loss_mc_val, top1_err, top5_err, mc_opt_err, mc_q_err = (
loss_des_val.item(),
loss_mc_val.item(),
top1_err.item(),
top5_err.item(),
mc_opt_err.item(),
mc_q_err.item()
)
mb_size_mc = mc_q.size()[0]
else:
mc_opt_err, mc_q_err = None, None
mb_size_mc = None
loss_des_val, top1_err, top5_err = (
loss_des_val.item(),
top1_err.item(),
top5_err.item()
)
val_meter.iter_toc()
val_meter.update_stats(
top1_err,
top5_err,
mc_opt_err,
mc_q_err,
loss_des_val,
loss_mc_val,
des_q.size()[0],
mb_size_mc
)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
if test_imp:
print(" --- Descriptive questions results --- ")
print("Des_ans")
print(des_ans)
print("Argmax => prediction")
print(torch.argmax(pred_des_ans, dim=1, keepdim=False))
print("Top1_err and Top5err")
print(top1_err, top5_err)
print("Loss_des_val = {}".format(loss_des_val))
if has_mc:
print(" --- Multiple Choice questions results --- ")
print("mc_opt_err = {} \nmc_q_err = {}".format(mc_opt_err, mc_q_err))
print("Loss_mc_val = {}".format(loss_mc_val))
test_counter += 1
if test_counter == 4:
break
val_meter.log_epoch_stats(cur_epoch)
val_meter.reset()
def build_clevrer_model(cfg, gpu_id=None):
dataset = Clevrertext(cfg, 'train')
vocab_len = dataset.get_vocab_len()
ans_vocab_len = dataset.get_ans_vocab_len()
vocab = dataset.get_vocab()
if torch.cuda.is_available():
assert (
cfg.NUM_GPUS <= torch.cuda.device_count()
), "Cannot use more GPU devices than available"
else:
assert (
cfg.NUM_GPUS == 0
), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs."
name = cfg.MODEL.MODEL_NAME
model = MODEL_REGISTRY.get(name)(cfg, vocab_len, ans_vocab_len, vocab)
if cfg.NUM_GPUS:
if gpu_id is None:
cur_device = torch.cuda.current_device()
else:
cur_device = gpu_id
model = model.cuda(device=cur_device)
if cfg.NUM_GPUS > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[cur_device], output_device=cur_device
)
return model
def build_dataloader(cfg, mode):
des_dataset = Clevrertext_des(cfg, mode)
mc_dataset = Clevrertext_mc(cfg, mode)
dataset = Clevrertext_join(des_dataset, mc_dataset)
dataloader = DataLoader(dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
shuffle= mode=='train', num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY)
return dataloader
def train(cfg):
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
logging.setup_logging(cfg.OUTPUT_DIR)
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
model = build_clevrer_model(cfg)
optimizer = optim.construct_optimizer(model, cfg)
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
if cfg.TRAIN.DATASET != 'Clevrertext_join':
print("This train script does not support your dataset: -{}-. Only Clevrertext_join".format(cfg.TRAIN.DATASET))
exit()
train_loader = build_dataloader(cfg, "train")
val_loader = build_dataloader(cfg, "val")
train_meter = ClevrerTrainMeter(len(train_loader), cfg)
val_meter = ClevrerValMeter(len(val_loader), cfg)
logger.info("Start epoch: {}".format(start_epoch + 1))
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None
)
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg)
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg)
def test_implementation(cfg):
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
logging.setup_logging(cfg.OUTPUT_DIR)
logger.info("Test implementation")
model = build_clevrer_model(cfg)
optimizer = optim.construct_optimizer(model, cfg)
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
if cfg.TRAIN.DATASET != 'Clevrertext_join':
print("This train script does not support your dataset: -{}-. Only Clevrertext_join".format(cfg.TRAIN.DATASET))
train_loader = build_dataloader(cfg, "train")
val_loader = build_dataloader(cfg, "val")
train_meter = ClevrerTrainMeter(len(train_loader), cfg)
val_meter = ClevrerValMeter(len(val_loader), cfg)
logger.info("Start epoch: {}".format(start_epoch + 1))
model_before = copy.deepcopy(model)
cur_epoch = start_epoch
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, test_imp=True
)
print("Check if parameters changed")
for (p_b_name, p_b), (p_name, p) in zip(model_before.named_parameters(), model.named_parameters()):
if p.requires_grad:
print("Parameter requires grad:")
print(p_name, p_b_name)
assert (p_b != p).any()
print("--Check--")
else:
print("Parameter does not require grad:")
print(p_name)
print(p)
print("Val epoch")
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, test_imp=True) | true | true |
f7f4d8f12db74bf89cfd3ca8261da108c94fb6e3 | 426 | py | Python | runoob/basic_tutorial/parent_child_2.py | zeroonegit/python | 919f8bb14ae91e37e42ff08192df24b60135596f | [
"MIT"
] | 1 | 2017-03-30T00:43:40.000Z | 2017-03-30T00:43:40.000Z | runoob/basic_tutorial/parent_child_2.py | QuinceySun/Python | 919f8bb14ae91e37e42ff08192df24b60135596f | [
"MIT"
] | null | null | null | runoob/basic_tutorial/parent_child_2.py | QuinceySun/Python | 919f8bb14ae91e37e42ff08192df24b60135596f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
############################
# File Name: parent_child_2.py
# Author: One Zero
# Mail: zeroonegit@gmail.com
# Created Time: 2015-12-28 23:23:13
############################
class Parent: # 定义父类
def myMethod(self):
print("调用父类方法")
class Child(Parent): # 定义子类
def myMethod(self):
print("调用子类方法")
c = Child() # 子类实例化
c.myMethod() # 子类调用重写方法
| 19.363636 | 35 | 0.528169 | true | true | |
f7f4dc9cf5e2229aa7650cc2913a282aef1762e9 | 3,051 | py | Python | gcloud/commons/template/migrations/0001_initial.py | gangh/bk-sops | 29f4b4915be42650c2eeee637e0cf798e4066f09 | [
"Apache-2.0"
] | 1 | 2019-12-23T07:23:35.000Z | 2019-12-23T07:23:35.000Z | gcloud/commons/template/migrations/0001_initial.py | bk-sops/bk-sops | 9f5950b13473bf7b5032528b20016b7a571bb3cd | [
"Apache-2.0"
] | 9 | 2020-02-12T03:15:49.000Z | 2021-06-10T22:04:51.000Z | gcloud/commons/template/migrations/0001_initial.py | tanghaiyong1989/bk-sops-ce | 7388914acc4004469982d6b5bf9cd7641bdf82f7 | [
"Apache-2.0"
] | 1 | 2022-01-17T11:32:05.000Z | 2022-01-17T11:32:05.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pipeline', '0013_old_template_process'),
]
operations = [
migrations.CreateModel(
name='CommonTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.CharField(default=b'Other', max_length=255, verbose_name='\u6a21\u677f\u7c7b\u578b', choices=[(b'OpsTools', '\u8fd0\u7ef4\u5de5\u5177'), (b'MonitorAlarm', '\u76d1\u63a7\u544a\u8b66'), (b'ConfManage', '\u914d\u7f6e\u7ba1\u7406'), (b'DevTools', '\u5f00\u53d1\u5de5\u5177'), (b'EnterpriseIT', '\u4f01\u4e1aIT'), (b'OfficeApp', '\u529e\u516c\u5e94\u7528'), (b'Other', '\u5176\u5b83')])),
('notify_type', models.CharField(default=b'[]', max_length=128, verbose_name='\u6d41\u7a0b\u4e8b\u4ef6\u901a\u77e5\u65b9\u5f0f')),
('notify_receivers', models.TextField(default=b'{}', verbose_name='\u6d41\u7a0b\u4e8b\u4ef6\u901a\u77e5\u4eba')),
('time_out', models.IntegerField(default=20, verbose_name='\u6d41\u7a0b\u8d85\u65f6\u65f6\u95f4(\u5206\u949f)')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5220\u9664')),
('collector', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='\u6536\u85cf\u6a21\u677f\u7684\u4eba', blank=True)),
('pipeline_template', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to_field=b'template_id', blank=True, to='pipeline.PipelineTemplate', null=True)),
],
options={
'ordering': ['-id'],
'abstract': False,
'verbose_name': '\u516c\u5171\u6d41\u7a0b\u6a21\u677f CommonTemplate',
'verbose_name_plural': '\u516c\u5171\u6d41\u7a0b\u6a21\u677f CommonTemplate',
'permissions': [('create_task', '\u65b0\u5efa\u4efb\u52a1'), ('fill_params', '\u586b\u5199\u53c2\u6570'), ('execute_task', '\u6267\u884c\u4efb\u52a1')],
},
),
]
| 61.02 | 419 | 0.683382 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pipeline', '0013_old_template_process'),
]
operations = [
migrations.CreateModel(
name='CommonTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.CharField(default=b'Other', max_length=255, verbose_name='\u6a21\u677f\u7c7b\u578b', choices=[(b'OpsTools', '\u8fd0\u7ef4\u5de5\u5177'), (b'MonitorAlarm', '\u76d1\u63a7\u544a\u8b66'), (b'ConfManage', '\u914d\u7f6e\u7ba1\u7406'), (b'DevTools', '\u5f00\u53d1\u5de5\u5177'), (b'EnterpriseIT', '\u4f01\u4e1aIT'), (b'OfficeApp', '\u529e\u516c\u5e94\u7528'), (b'Other', '\u5176\u5b83')])),
('notify_type', models.CharField(default=b'[]', max_length=128, verbose_name='\u6d41\u7a0b\u4e8b\u4ef6\u901a\u77e5\u65b9\u5f0f')),
('notify_receivers', models.TextField(default=b'{}', verbose_name='\u6d41\u7a0b\u4e8b\u4ef6\u901a\u77e5\u4eba')),
('time_out', models.IntegerField(default=20, verbose_name='\u6d41\u7a0b\u8d85\u65f6\u65f6\u95f4(\u5206\u949f)')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5220\u9664')),
('collector', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='\u6536\u85cf\u6a21\u677f\u7684\u4eba', blank=True)),
('pipeline_template', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to_field=b'template_id', blank=True, to='pipeline.PipelineTemplate', null=True)),
],
options={
'ordering': ['-id'],
'abstract': False,
'verbose_name': '\u516c\u5171\u6d41\u7a0b\u6a21\u677f CommonTemplate',
'verbose_name_plural': '\u516c\u5171\u6d41\u7a0b\u6a21\u677f CommonTemplate',
'permissions': [('create_task', '\u65b0\u5efa\u4efb\u52a1'), ('fill_params', '\u586b\u5199\u53c2\u6570'), ('execute_task', '\u6267\u884c\u4efb\u52a1')],
},
),
]
| true | true |
f7f4dcf61ec7e2d85bf3dc6a2bab6106bdb18d18 | 48 | py | Python | implementation_files/cosim_pandapipes_pandapower/simulators/time_series_player/__init__.py | ERIGrid2/benchmark-model-multi-energy-networks | 4172480a5fcdf99d086b98ea24e00342f8e42a91 | [
"BSD-3-Clause"
] | null | null | null | implementation_files/cosim_pandapipes_pandapower/simulators/time_series_player/__init__.py | ERIGrid2/benchmark-model-multi-energy-networks | 4172480a5fcdf99d086b98ea24e00342f8e42a91 | [
"BSD-3-Clause"
] | null | null | null | implementation_files/cosim_pandapipes_pandapower/simulators/time_series_player/__init__.py | ERIGrid2/benchmark-model-multi-energy-networks | 4172480a5fcdf99d086b98ea24e00342f8e42a91 | [
"BSD-3-Clause"
] | null | null | null | from .mosaik_wrapper import TimeSeriesPlayerSim
| 24 | 47 | 0.895833 | from .mosaik_wrapper import TimeSeriesPlayerSim
| true | true |
f7f4dd01e125702612abe21ad1f91687ae550f99 | 429 | py | Python | source/interprocedural_analyses/taint/test/integration/source_sink_flow.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | 1 | 2022-02-10T10:51:32.000Z | 2022-02-10T10:51:32.000Z | source/interprocedural_analyses/taint/test/integration/source_sink_flow.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | source/interprocedural_analyses/taint/test/integration/source_sink_flow.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import _test_sink, _test_source
def bar():
return _test_source()
def qux(arg):
_test_sink(arg)
def bad(ok, arg):
qux(arg)
def some_source():
return bar()
def match_flows():
x = some_source()
bad(5, x)
| 15.321429 | 65 | 0.682984 |
from builtins import _test_sink, _test_source
def bar():
return _test_source()
def qux(arg):
_test_sink(arg)
def bad(ok, arg):
qux(arg)
def some_source():
return bar()
def match_flows():
x = some_source()
bad(5, x)
| true | true |
f7f4dd86bce01fbd6f1ea363305d512e3923d832 | 3,520 | py | Python | Android/NDK/android-ndk-r20b-win/build/gen_cygpath.py | X018/CCTOOL | 989af4d7edab82bf540400eb72eca4e7447d722c | [
"MIT"
] | null | null | null | Android/NDK/android-ndk-r20b-win/build/gen_cygpath.py | X018/CCTOOL | 989af4d7edab82bf540400eb72eca4e7447d722c | [
"MIT"
] | null | null | null | Android/NDK/android-ndk-r20b-win/build/gen_cygpath.py | X018/CCTOOL | 989af4d7edab82bf540400eb72eca4e7447d722c | [
"MIT"
] | null | null | null | #
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generates a make function approximating cygpath.
We don't just call cygpath (unless directed by NDK_USE_CYGPATH=1) because we
have to call this very often and doing so would be very slow. By doing this in
make, we can be much faster.
"""
from __future__ import print_function
import posixpath
import re
import sys
def get_mounts(mount_output):
"""Parses the output of mount and returns a dict of mounts.
Args:
mount_output: The text output from mount(1).
Returns:
A list of tuples mapping cygwin paths to Windows paths.
"""
mount_regex = re.compile(r'^(\S+) on (\S+) .*$')
# We use a list of tuples rather than a dict because we want to recurse on
# the list later anyway.
mounts = []
for line in mount_output.splitlines():
# Cygwin's mount doesn't use backslashes even in Windows paths, so no
# need to replace here.
match = mount_regex.search(line)
if match is not None:
win_path = match.group(1)
cyg_path = match.group(2)
if cyg_path == '/':
# Since we're going to be using patsubst on these, we need to
# make sure that the rule for / is applied last, otherwise
# we'll replace all other cygwin paths with that one.
mounts.insert(0, (cyg_path, win_path))
elif cyg_path.startswith('/cygdrive/'):
# We need both /cygdrive/c and /cygdrive/C to point to C:.
letter = posixpath.basename(cyg_path)
lower_path = posixpath.join('/cygdrive', letter.lower())
upper_path = posixpath.join('/cygdrive', letter.upper())
mounts.append((lower_path, win_path))
mounts.append((upper_path, win_path))
else:
mounts.append((cyg_path, win_path))
return mounts
def make_cygpath_function(mounts):
"""Creates a make function that can be used in place of cygpath.
Args:
mounts: A list of tuples decribing filesystem mounts.
Returns:
The body of a function implementing cygpath in make as a string.
"""
# We're building a bunch of nested patsubst calls. Once we've written each
# of the calls, we pass the function input to the inner most call.
if not mounts:
return '$1'
cyg_path, win_path = mounts[0]
if not cyg_path.endswith('/'):
cyg_path += '/'
if not win_path.endswith('/'):
win_path += '/'
other_mounts = mounts[1:]
return '$(patsubst {}%,{}%,\n{})'.format(
cyg_path, win_path, make_cygpath_function(other_mounts))
def main():
# We're invoked from make and piped the output of `mount` so we can
# determine what mappings to make.
mount_output = sys.stdin.read()
mounts = get_mounts(mount_output)
print(make_cygpath_function(mounts))
if __name__ == '__main__':
main()
| 34.174757 | 78 | 0.650568 |
from __future__ import print_function
import posixpath
import re
import sys
def get_mounts(mount_output):
mount_regex = re.compile(r'^(\S+) on (\S+) .*$')
mounts = []
for line in mount_output.splitlines():
match = mount_regex.search(line)
if match is not None:
win_path = match.group(1)
cyg_path = match.group(2)
if cyg_path == '/':
# make sure that the rule for / is applied last, otherwise
# we'll replace all other cygwin paths with that one.
mounts.insert(0, (cyg_path, win_path))
elif cyg_path.startswith('/cygdrive/'):
letter = posixpath.basename(cyg_path)
lower_path = posixpath.join('/cygdrive', letter.lower())
upper_path = posixpath.join('/cygdrive', letter.upper())
mounts.append((lower_path, win_path))
mounts.append((upper_path, win_path))
else:
mounts.append((cyg_path, win_path))
return mounts
def make_cygpath_function(mounts):
if not mounts:
return '$1'
cyg_path, win_path = mounts[0]
if not cyg_path.endswith('/'):
cyg_path += '/'
if not win_path.endswith('/'):
win_path += '/'
other_mounts = mounts[1:]
return '$(patsubst {}%,{}%,\n{})'.format(
cyg_path, win_path, make_cygpath_function(other_mounts))
def main():
# determine what mappings to make.
mount_output = sys.stdin.read()
mounts = get_mounts(mount_output)
print(make_cygpath_function(mounts))
if __name__ == '__main__':
main()
| true | true |
f7f4dddb4f4c05b540fd89d2de427c9aee5e468f | 5,943 | py | Python | toolchain/riscv/MSYS/python/Tools/demo/redemo.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 207 | 2018-10-01T08:53:01.000Z | 2022-03-14T12:15:54.000Z | toolchain/riscv/MSYS/python/Tools/demo/redemo.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 8 | 2019-06-29T14:18:51.000Z | 2022-02-19T07:30:27.000Z | toolchain/riscv/MSYS/python/Tools/demo/redemo.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 76 | 2020-03-16T01:47:46.000Z | 2022-03-21T16:37:07.000Z | #!/usr/bin/env python3
"""Basic regular expression demonstration facility (Perl style syntax)."""
from tkinter import *
import re
class ReDemo:
def __init__(self, master):
self.master = master
self.promptdisplay = Label(self.master, anchor=W,
text="Enter a Perl-style regular expression:")
self.promptdisplay.pack(side=TOP, fill=X)
self.regexdisplay = Entry(self.master)
self.regexdisplay.pack(fill=X)
self.regexdisplay.focus_set()
self.addoptions()
self.statusdisplay = Label(self.master, text="", anchor=W)
self.statusdisplay.pack(side=TOP, fill=X)
self.labeldisplay = Label(self.master, anchor=W,
text="Enter a string to search:")
self.labeldisplay.pack(fill=X)
self.labeldisplay.pack(fill=X)
self.showframe = Frame(master)
self.showframe.pack(fill=X, anchor=W)
self.showvar = StringVar(master)
self.showvar.set("first")
self.showfirstradio = Radiobutton(self.showframe,
text="Highlight first match",
variable=self.showvar,
value="first",
command=self.recompile)
self.showfirstradio.pack(side=LEFT)
self.showallradio = Radiobutton(self.showframe,
text="Highlight all matches",
variable=self.showvar,
value="all",
command=self.recompile)
self.showallradio.pack(side=LEFT)
self.stringdisplay = Text(self.master, width=60, height=4)
self.stringdisplay.pack(fill=BOTH, expand=1)
self.stringdisplay.tag_configure("hit", background="yellow")
self.grouplabel = Label(self.master, text="Groups:", anchor=W)
self.grouplabel.pack(fill=X)
self.grouplist = Listbox(self.master)
self.grouplist.pack(expand=1, fill=BOTH)
self.regexdisplay.bind('<Key>', self.recompile)
self.stringdisplay.bind('<Key>', self.reevaluate)
self.compiled = None
self.recompile()
btags = self.regexdisplay.bindtags()
self.regexdisplay.bindtags(btags[1:] + btags[:1])
btags = self.stringdisplay.bindtags()
self.stringdisplay.bindtags(btags[1:] + btags[:1])
def addoptions(self):
self.frames = []
self.boxes = []
self.vars = []
for name in ('IGNORECASE',
'MULTILINE',
'DOTALL',
'VERBOSE'):
if len(self.boxes) % 3 == 0:
frame = Frame(self.master)
frame.pack(fill=X)
self.frames.append(frame)
val = getattr(re, name).value
var = IntVar()
box = Checkbutton(frame,
variable=var, text=name,
offvalue=0, onvalue=val,
command=self.recompile)
box.pack(side=LEFT)
self.boxes.append(box)
self.vars.append(var)
def getflags(self):
flags = 0
for var in self.vars:
flags = flags | var.get()
flags = flags
return flags
def recompile(self, event=None):
try:
self.compiled = re.compile(self.regexdisplay.get(),
self.getflags())
bg = self.promptdisplay['background']
self.statusdisplay.config(text="", background=bg)
except re.error as msg:
self.compiled = None
self.statusdisplay.config(
text="re.error: %s" % str(msg),
background="red")
self.reevaluate()
def reevaluate(self, event=None):
try:
self.stringdisplay.tag_remove("hit", "1.0", END)
except TclError:
pass
try:
self.stringdisplay.tag_remove("hit0", "1.0", END)
except TclError:
pass
self.grouplist.delete(0, END)
if not self.compiled:
return
self.stringdisplay.tag_configure("hit", background="yellow")
self.stringdisplay.tag_configure("hit0", background="orange")
text = self.stringdisplay.get("1.0", END)
last = 0
nmatches = 0
while last <= len(text):
m = self.compiled.search(text, last)
if m is None:
break
first, last = m.span()
if last == first:
last = first+1
tag = "hit0"
else:
tag = "hit"
pfirst = "1.0 + %d chars" % first
plast = "1.0 + %d chars" % last
self.stringdisplay.tag_add(tag, pfirst, plast)
if nmatches == 0:
self.stringdisplay.yview_pickplace(pfirst)
groups = list(m.groups())
groups.insert(0, m.group())
for i in range(len(groups)):
g = "%2d: %r" % (i, groups[i])
self.grouplist.insert(END, g)
nmatches = nmatches + 1
if self.showvar.get() == "first":
break
if nmatches == 0:
self.statusdisplay.config(text="(no match)",
background="yellow")
else:
self.statusdisplay.config(text="")
# Main function, run when invoked as a stand-alone Python program.
def main():
root = Tk()
demo = ReDemo(root)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
main()
| 34.352601 | 75 | 0.502272 |
from tkinter import *
import re
class ReDemo:
def __init__(self, master):
self.master = master
self.promptdisplay = Label(self.master, anchor=W,
text="Enter a Perl-style regular expression:")
self.promptdisplay.pack(side=TOP, fill=X)
self.regexdisplay = Entry(self.master)
self.regexdisplay.pack(fill=X)
self.regexdisplay.focus_set()
self.addoptions()
self.statusdisplay = Label(self.master, text="", anchor=W)
self.statusdisplay.pack(side=TOP, fill=X)
self.labeldisplay = Label(self.master, anchor=W,
text="Enter a string to search:")
self.labeldisplay.pack(fill=X)
self.labeldisplay.pack(fill=X)
self.showframe = Frame(master)
self.showframe.pack(fill=X, anchor=W)
self.showvar = StringVar(master)
self.showvar.set("first")
self.showfirstradio = Radiobutton(self.showframe,
text="Highlight first match",
variable=self.showvar,
value="first",
command=self.recompile)
self.showfirstradio.pack(side=LEFT)
self.showallradio = Radiobutton(self.showframe,
text="Highlight all matches",
variable=self.showvar,
value="all",
command=self.recompile)
self.showallradio.pack(side=LEFT)
self.stringdisplay = Text(self.master, width=60, height=4)
self.stringdisplay.pack(fill=BOTH, expand=1)
self.stringdisplay.tag_configure("hit", background="yellow")
self.grouplabel = Label(self.master, text="Groups:", anchor=W)
self.grouplabel.pack(fill=X)
self.grouplist = Listbox(self.master)
self.grouplist.pack(expand=1, fill=BOTH)
self.regexdisplay.bind('<Key>', self.recompile)
self.stringdisplay.bind('<Key>', self.reevaluate)
self.compiled = None
self.recompile()
btags = self.regexdisplay.bindtags()
self.regexdisplay.bindtags(btags[1:] + btags[:1])
btags = self.stringdisplay.bindtags()
self.stringdisplay.bindtags(btags[1:] + btags[:1])
def addoptions(self):
self.frames = []
self.boxes = []
self.vars = []
for name in ('IGNORECASE',
'MULTILINE',
'DOTALL',
'VERBOSE'):
if len(self.boxes) % 3 == 0:
frame = Frame(self.master)
frame.pack(fill=X)
self.frames.append(frame)
val = getattr(re, name).value
var = IntVar()
box = Checkbutton(frame,
variable=var, text=name,
offvalue=0, onvalue=val,
command=self.recompile)
box.pack(side=LEFT)
self.boxes.append(box)
self.vars.append(var)
def getflags(self):
flags = 0
for var in self.vars:
flags = flags | var.get()
flags = flags
return flags
def recompile(self, event=None):
try:
self.compiled = re.compile(self.regexdisplay.get(),
self.getflags())
bg = self.promptdisplay['background']
self.statusdisplay.config(text="", background=bg)
except re.error as msg:
self.compiled = None
self.statusdisplay.config(
text="re.error: %s" % str(msg),
background="red")
self.reevaluate()
def reevaluate(self, event=None):
try:
self.stringdisplay.tag_remove("hit", "1.0", END)
except TclError:
pass
try:
self.stringdisplay.tag_remove("hit0", "1.0", END)
except TclError:
pass
self.grouplist.delete(0, END)
if not self.compiled:
return
self.stringdisplay.tag_configure("hit", background="yellow")
self.stringdisplay.tag_configure("hit0", background="orange")
text = self.stringdisplay.get("1.0", END)
last = 0
nmatches = 0
while last <= len(text):
m = self.compiled.search(text, last)
if m is None:
break
first, last = m.span()
if last == first:
last = first+1
tag = "hit0"
else:
tag = "hit"
pfirst = "1.0 + %d chars" % first
plast = "1.0 + %d chars" % last
self.stringdisplay.tag_add(tag, pfirst, plast)
if nmatches == 0:
self.stringdisplay.yview_pickplace(pfirst)
groups = list(m.groups())
groups.insert(0, m.group())
for i in range(len(groups)):
g = "%2d: %r" % (i, groups[i])
self.grouplist.insert(END, g)
nmatches = nmatches + 1
if self.showvar.get() == "first":
break
if nmatches == 0:
self.statusdisplay.config(text="(no match)",
background="yellow")
else:
self.statusdisplay.config(text="")
def main():
root = Tk()
demo = ReDemo(root)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
main()
| true | true |
f7f4de0222bfcd8aa6c17624df23ae0566c9314f | 4,742 | py | Python | andres@programo.ual.es/evaluatePCA.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | andres@programo.ual.es/evaluatePCA.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | andres@programo.ual.es/evaluatePCA.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
import inferpy as inf
from datareduction.bayesian_pca_DR import BayesianPCA_DR
from datareduction.variational_gaussian_mixture_DR import VariationalGaussianMixture_DR
from prml.feature_extractions import BayesianPCA
from prml.rv import VariationalGaussianMixture
from prml.features import PolynomialFeatures
from prml.linear import (
VariationalLinearRegressor,
VariationalLogisticRegressor
)
np.random.seed(0)
############## GENERATE DATA ########################
N=200
K=10
M=10
D=10
def create_toy_data(sample_size=100, ndim_hidden=1, ndim_observe=2, std=1.):
Z = np.random.normal(size=(sample_size, ndim_hidden))
mu = np.random.uniform(-5, 5, size=(ndim_observe))
W = np.random.uniform(-5, 5, (ndim_hidden, ndim_observe))
#print(W.T)
X = Z.dot(W) + mu + np.random.normal(scale=std, size=(sample_size, ndim_observe))
return X
data = create_toy_data(sample_size=N, ndim_hidden=K, ndim_observe=D, std=1.)
#data = datasets.load_iris().data
#data = datasets.fetch_california_housing().data
#data = datasets.load_digits().data
np.take(data,np.random.permutation(data.shape[0]),axis=0,out=data)
N=data.shape[0]
D=data.shape[1]
x_train=data[0:int(2.0*N/3),:]
x_test=data[int(N/3.0):N,:]
######################################################
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
#data = data[np.random.choice(np.where(target == 3)[0], 10000)]
np.take(mnist.train.images,np.random.permutation(mnist.train.images.shape[0]),axis=0,out=mnist.train.images)
np.take(mnist.test.images,np.random.permutation(mnist.test.images.shape[0]),axis=0,out=mnist.test.images)
D=data.shape[1]
x_train = mnist.train.images#[0:2000,:]
x_test = mnist.test.images#[0:2000,:]
#####################################################
#bpca = BayesianPCA(n_components=K)
#bpca.fit(x_train, initial="eigen")
#print(np.sum(bpca.log_proba(x_test)))
#test_ll[0,:] = np.repeat(np.sum(bpca.log_proba(x_test)),10)
######################################################
samples = np.zeros(10)
samples = np.array([int(x_train.shape[0]*(m+1)/100) for m in range(0,10) ])
samples = np.array([25, 50, 100, 250, 500, 750, 1000])
#samples = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
#samples = np.array([20, 50, 100, 250, 500, 1000])
clusterError = np.zeros(samples.shape[0])
test_ll = np.zeros((4,samples.shape[0]))
test_ll[0,:]=samples
for m in range(0,samples.shape[0]):
print(samples[m])
M=samples[m]
np.random.seed(1234)
bpca_dr = BayesianPCA_DR(n_components=K)
bpca_dr.fit(x_train, initial="eigen", n_clusters=M, cluster_method="SS")
test_ll[1,m]=np.sum(bpca_dr.log_proba(x_test))
clusterError[m]=bpca_dr.clusterError
print(test_ll[1,m])
print(clusterError[m])
print(np.sum(bpca_dr.log_proba(x_test)))
#distance_ss[m]=np.linalg.norm(bpca.W - bpca_dr.W)
np.random.seed(1234)
bpca_dr = BayesianPCA_DR(n_components=K)
bpca_dr.fit(x_train, initial="eigen", n_clusters=M, cluster_method="NoSS")
test_ll[2,m]= np.sum(bpca_dr.log_proba(x_test))
print(np.sum(bpca_dr.log_proba(x_test)))
#distance_noss[m]=np.linalg.norm(bpca.W - bpca_dr.W)
np.random.seed(1234)
bpca_dr = BayesianPCA_DR(n_components=K)
bpca_dr.fit(x_train, initial="eigen", n_clusters=M, cluster_method="random")
test_ll[3,m]= np.sum(bpca_dr.log_proba(x_test))
print(np.sum(bpca_dr.log_proba(x_test)))
#distance_noss[m]=np.linalg.norm(bpca.W - bpca_dr.W)
np.savetxt('./figs/PCA_MINST_clustererror.txt', clusterError)
np.savetxt('./figs/PCA_MINST_data.txt',test_ll)
test_ll = np.loadtxt('./datareduction/figs/PCA_MINST_data.txt')
clusterError = np.loadtxt('./datareduction/figs/PCA_MINST_clustererror.txt')
x = [m for m in range(0,test_ll.shape[1])]
plt.figure(0)
plt.plot(x,test_ll[1,:], c='b', label='DR-SS')
plt.plot(x,test_ll[2,:], c='g', label='DR-NoSS')
plt.plot(x,test_ll[3,:], c='y', label='DR-Random')
plt.legend(loc='lower right', shadow=True)
plt.xticks(x, test_ll[0,:])
plt.ylim(-0.5e07, 0.2e07, 100)
plt.savefig("./datareduction/figs/PCA_MINST_LL.pdf",bbox_inches='tight')
plt.figure(1)
plt.plot(x,test_ll[1,:], c='b', label='Log-Likelihood')
plt.plot(x,clusterError, c='k', label='ClusterError')
plt.legend(loc='center right', shadow=True)
plt.xticks(x, test_ll[0,:])
plt.ylim(2e05, 2e06, 100)
plt.savefig("./datareduction/figs/PCA_MINST_ClusterError.pdf",bbox_inches='tight')
plt.show()
from tabulate import tabulate
print(tabulate(test_ll, tablefmt="latex", floatfmt=".2f"))
print(tabulate(clusterError[None,:], tablefmt="latex", floatfmt=".2f"))
| 34.115108 | 108 | 0.695698 | import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
import inferpy as inf
from datareduction.bayesian_pca_DR import BayesianPCA_DR
from datareduction.variational_gaussian_mixture_DR import VariationalGaussianMixture_DR
from prml.feature_extractions import BayesianPCA
from prml.rv import VariationalGaussianMixture
from prml.features import PolynomialFeatures
from prml.linear import (
VariationalLinearRegressor,
VariationalLogisticRegressor
)
np.random.seed(0)
| true | true |
f7f4de586136fbfae968e74b2df519bc44b47d98 | 3,805 | py | Python | cloudkitty/rating/hash/db/sqlalchemy/alembic/versions/3dd7e13527f3_initial_migration.py | wanghuiict/cloudkitty | 11ff713042eb0354f497f7051130630c46860735 | [
"Apache-2.0"
] | 1 | 2021-11-23T02:23:19.000Z | 2021-11-23T02:23:19.000Z | cloudkitty/rating/hash/db/sqlalchemy/alembic/versions/3dd7e13527f3_initial_migration.py | shanafang9/cloudkitty | 911c90569ccb09ecf0d7aa11a5a707c8ebda09cf | [
"Apache-2.0"
] | null | null | null | cloudkitty/rating/hash/db/sqlalchemy/alembic/versions/3dd7e13527f3_initial_migration.py | shanafang9/cloudkitty | 911c90569ccb09ecf0d7aa11a5a707c8ebda09cf | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration
Revision ID: 3dd7e13527f3
Revises: None
Create Date: 2015-03-10 13:06:41.067563
"""
# revision identifiers, used by Alembic.
revision = '3dd7e13527f3'
down_revision = None
from alembic import op # noqa: E402
import sqlalchemy as sa # noqa: E402
def upgrade():
op.create_table(
'hashmap_services',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('service_id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('service_id'),
mysql_charset='utf8',
mysql_engine='InnoDB')
op.create_table(
'hashmap_fields',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('field_id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('service_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['service_id'],
['hashmap_services.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('field_id'),
sa.UniqueConstraint('field_id', 'name', name='uniq_field'),
sa.UniqueConstraint(
'service_id',
'name',
name='uniq_map_service_field'),
mysql_charset='utf8',
mysql_engine='InnoDB')
op.create_table(
'hashmap_groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('group_id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('group_id'),
sa.UniqueConstraint('name'),
mysql_charset='utf8',
mysql_engine='InnoDB')
op.create_table(
'hashmap_maps',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mapping_id', sa.String(length=36), nullable=False),
sa.Column('value', sa.String(length=255), nullable=True),
sa.Column('cost', sa.Numeric(20, 8), nullable=False),
sa.Column(
'map_type',
sa.Enum('flat', 'rate', name='enum_map_type'),
nullable=False),
sa.Column('service_id', sa.Integer(), nullable=True),
sa.Column('field_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['field_id'],
['hashmap_fields.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['group_id'],
['hashmap_groups.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(
['service_id'],
['hashmap_services.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('mapping_id'),
sa.UniqueConstraint(
'value',
'field_id',
name='uniq_field_mapping'),
sa.UniqueConstraint(
'value',
'service_id',
name='uniq_service_mapping'),
mysql_charset='utf8',
mysql_engine='InnoDB')
| 35.560748 | 75 | 0.610512 |
revision = '3dd7e13527f3'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'hashmap_services',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('service_id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('service_id'),
mysql_charset='utf8',
mysql_engine='InnoDB')
op.create_table(
'hashmap_fields',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('field_id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('service_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['service_id'],
['hashmap_services.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('field_id'),
sa.UniqueConstraint('field_id', 'name', name='uniq_field'),
sa.UniqueConstraint(
'service_id',
'name',
name='uniq_map_service_field'),
mysql_charset='utf8',
mysql_engine='InnoDB')
op.create_table(
'hashmap_groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('group_id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('group_id'),
sa.UniqueConstraint('name'),
mysql_charset='utf8',
mysql_engine='InnoDB')
op.create_table(
'hashmap_maps',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mapping_id', sa.String(length=36), nullable=False),
sa.Column('value', sa.String(length=255), nullable=True),
sa.Column('cost', sa.Numeric(20, 8), nullable=False),
sa.Column(
'map_type',
sa.Enum('flat', 'rate', name='enum_map_type'),
nullable=False),
sa.Column('service_id', sa.Integer(), nullable=True),
sa.Column('field_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['field_id'],
['hashmap_fields.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['group_id'],
['hashmap_groups.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(
['service_id'],
['hashmap_services.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('mapping_id'),
sa.UniqueConstraint(
'value',
'field_id',
name='uniq_field_mapping'),
sa.UniqueConstraint(
'value',
'service_id',
name='uniq_service_mapping'),
mysql_charset='utf8',
mysql_engine='InnoDB')
| true | true |
f7f4dee1a9b28f1634c57d526baaa7945cfda495 | 1,137 | py | Python | pycorrector/bert/predict_mask.py | Sueying/pycorrector | d4c8dbee7d055cd410d56bd1b52f0780ec8d1983 | [
"Apache-2.0"
] | 2 | 2020-09-21T01:59:48.000Z | 2020-09-21T02:16:15.000Z | pycorrector/bert/predict_mask.py | Sueying/pycorrector | d4c8dbee7d055cd410d56bd1b52f0780ec8d1983 | [
"Apache-2.0"
] | null | null | null | pycorrector/bert/predict_mask.py | Sueying/pycorrector | d4c8dbee7d055cd410d56bd1b52f0780ec8d1983 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description: Run BERT on Masked LM.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from transformers import pipeline
MASK_TOKEN = "[MASK]"
pwd_path = os.path.abspath(os.path.dirname(__file__))
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--bert_model_dir", default=os.path.join(pwd_path, '../data/bert_models/chinese_finetuned_lm/'),
type=str,
help="Bert pre-trained model dir")
args = parser.parse_args()
nlp = pipeline('fill-mask',
model=args.bert_model_dir,
tokenizer=args.bert_model_dir
)
i = nlp('hi lili, What is the name of the [MASK] ?')
print(i)
i = nlp('今天[MASK]情很好')
print(i)
i = nlp('少先队员[MASK]该为老人让座')
print(i)
i = nlp('[MASK]七学习是人工智能领遇最能体现智能的一个分知')
print(i)
i = nlp('机[MASK]学习是人工智能领遇最能体现智能的一个分知')
print(i)
if __name__ == "__main__":
main()
| 21.865385 | 120 | 0.623571 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from transformers import pipeline
MASK_TOKEN = "[MASK]"
pwd_path = os.path.abspath(os.path.dirname(__file__))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert_model_dir", default=os.path.join(pwd_path, '../data/bert_models/chinese_finetuned_lm/'),
type=str,
help="Bert pre-trained model dir")
args = parser.parse_args()
nlp = pipeline('fill-mask',
model=args.bert_model_dir,
tokenizer=args.bert_model_dir
)
i = nlp('hi lili, What is the name of the [MASK] ?')
print(i)
i = nlp('今天[MASK]情很好')
print(i)
i = nlp('少先队员[MASK]该为老人让座')
print(i)
i = nlp('[MASK]七学习是人工智能领遇最能体现智能的一个分知')
print(i)
i = nlp('机[MASK]学习是人工智能领遇最能体现智能的一个分知')
print(i)
if __name__ == "__main__":
main()
| true | true |
f7f4df9dc8d8d9e77ebf3bb98aed46f4f524bdc7 | 13,928 | py | Python | moto/ec2/responses/elastic_block_store.py | ljakimczuk/moto | ea2ccb944ec7cf56298744f771a62a12cbf45c50 | [
"Apache-2.0"
] | 1 | 2020-09-15T15:31:31.000Z | 2020-09-15T15:31:31.000Z | moto/ec2/responses/elastic_block_store.py | ljakimczuk/moto | ea2ccb944ec7cf56298744f771a62a12cbf45c50 | [
"Apache-2.0"
] | null | null | null | moto/ec2/responses/elastic_block_store.py | ljakimczuk/moto | ea2ccb944ec7cf56298744f771a62a12cbf45c50 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
class ElasticBlockStore(BaseResponse):
def attach_volume(self):
volume_id = self._get_param("VolumeId")
instance_id = self._get_param("InstanceId")
device_path = self._get_param("Device")
if self.is_not_dryrun("AttachVolume"):
attachment = self.ec2_backend.attach_volume(
volume_id, instance_id, device_path
)
template = self.response_template(ATTACHED_VOLUME_RESPONSE)
return template.render(attachment=attachment)
def copy_snapshot(self):
source_snapshot_id = self._get_param("SourceSnapshotId")
source_region = self._get_param("SourceRegion")
description = self._get_param("Description")
if self.is_not_dryrun("CopySnapshot"):
snapshot = self.ec2_backend.copy_snapshot(
source_snapshot_id, source_region, description
)
template = self.response_template(COPY_SNAPSHOT_RESPONSE)
return template.render(snapshot=snapshot)
def create_snapshot(self):
volume_id = self._get_param("VolumeId")
description = self._get_param("Description")
tags = self._parse_tag_specification("TagSpecification")
snapshot_tags = tags.get("snapshot", {})
if self.is_not_dryrun("CreateSnapshot"):
snapshot = self.ec2_backend.create_snapshot(volume_id, description)
snapshot.add_tags(snapshot_tags)
template = self.response_template(CREATE_SNAPSHOT_RESPONSE)
return template.render(snapshot=snapshot)
def create_volume(self):
size = self._get_param("Size")
zone = self._get_param("AvailabilityZone")
snapshot_id = self._get_param("SnapshotId")
tags = self._parse_tag_specification("TagSpecification")
volume_tags = tags.get("volume", {})
encrypted = self._get_param("Encrypted", if_none=False)
if self.is_not_dryrun("CreateVolume"):
volume = self.ec2_backend.create_volume(size, zone, snapshot_id, encrypted)
volume.add_tags(volume_tags)
template = self.response_template(CREATE_VOLUME_RESPONSE)
return template.render(volume=volume)
def delete_snapshot(self):
snapshot_id = self._get_param("SnapshotId")
if self.is_not_dryrun("DeleteSnapshot"):
self.ec2_backend.delete_snapshot(snapshot_id)
return DELETE_SNAPSHOT_RESPONSE
def delete_volume(self):
volume_id = self._get_param("VolumeId")
if self.is_not_dryrun("DeleteVolume"):
self.ec2_backend.delete_volume(volume_id)
return DELETE_VOLUME_RESPONSE
def describe_snapshots(self):
filters = filters_from_querystring(self.querystring)
snapshot_ids = self._get_multi_param("SnapshotId")
snapshots = self.ec2_backend.describe_snapshots(
snapshot_ids=snapshot_ids, filters=filters
)
template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE)
return template.render(snapshots=snapshots)
def describe_volumes(self):
filters = filters_from_querystring(self.querystring)
volume_ids = self._get_multi_param("VolumeId")
volumes = self.ec2_backend.describe_volumes(
volume_ids=volume_ids, filters=filters
)
template = self.response_template(DESCRIBE_VOLUMES_RESPONSE)
return template.render(volumes=volumes)
def describe_volume_attribute(self):
raise NotImplementedError(
"ElasticBlockStore.describe_volume_attribute is not yet implemented"
)
def describe_volume_status(self):
raise NotImplementedError(
"ElasticBlockStore.describe_volume_status is not yet implemented"
)
def detach_volume(self):
volume_id = self._get_param("VolumeId")
instance_id = self._get_param("InstanceId")
device_path = self._get_param("Device")
if self.is_not_dryrun("DetachVolume"):
attachment = self.ec2_backend.detach_volume(
volume_id, instance_id, device_path
)
template = self.response_template(DETATCH_VOLUME_RESPONSE)
return template.render(attachment=attachment)
def enable_volume_io(self):
if self.is_not_dryrun("EnableVolumeIO"):
raise NotImplementedError(
"ElasticBlockStore.enable_volume_io is not yet implemented"
)
def import_volume(self):
if self.is_not_dryrun("ImportVolume"):
raise NotImplementedError(
"ElasticBlockStore.import_volume is not yet implemented"
)
def describe_snapshot_attribute(self):
snapshot_id = self._get_param("SnapshotId")
groups = self.ec2_backend.get_create_volume_permission_groups(snapshot_id)
user_ids = self.ec2_backend.get_create_volume_permission_userids(snapshot_id)
template = self.response_template(DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE)
return template.render(snapshot_id=snapshot_id, groups=groups, userIds=user_ids)
def modify_snapshot_attribute(self):
snapshot_id = self._get_param("SnapshotId")
operation_type = self._get_param("OperationType")
groups = self._get_multi_param("UserGroup")
user_ids = self._get_multi_param("UserId")
if self.is_not_dryrun("ModifySnapshotAttribute"):
if operation_type == "add":
self.ec2_backend.add_create_volume_permission(
snapshot_id, user_ids=user_ids, groups=groups
)
elif operation_type == "remove":
self.ec2_backend.remove_create_volume_permission(
snapshot_id, user_ids=user_ids, groups=groups
)
return MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE
def modify_volume_attribute(self):
if self.is_not_dryrun("ModifyVolumeAttribute"):
raise NotImplementedError(
"ElasticBlockStore.modify_volume_attribute is not yet implemented"
)
def reset_snapshot_attribute(self):
if self.is_not_dryrun("ResetSnapshotAttribute"):
raise NotImplementedError(
"ElasticBlockStore.reset_snapshot_attribute is not yet implemented"
)
CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size>
{% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<encrypted>{{ volume.encrypted }}</encrypted>
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>creating</status>
<createTime>{{ volume.create_time}}</createTime>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</CreateVolumeResponse>"""
DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeSet>
{% for volume in volumes %}
<item>
<volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size>
{% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<encrypted>{{ volume.encrypted }}</encrypted>
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>{{ volume.status }}</status>
<createTime>{{ volume.create_time}}</createTime>
<attachmentSet>
{% if volume.attachment %}
<item>
<volumeId>{{ volume.id }}</volumeId>
<instanceId>{{ volume.attachment.instance.id }}</instanceId>
<device>{{ volume.attachment.device }}</device>
<status>attached</status>
<attachTime>{{volume.attachment.attach_time}}</attachTime>
<deleteOnTermination>false</deleteOnTermination>
</item>
{% endif %}
</attachmentSet>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</item>
{% endfor %}
</volumeSet>
</DescribeVolumesResponse>"""
DELETE_VOLUME_RESPONSE = """<DeleteVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteVolumeResponse>"""
ATTACHED_VOLUME_RESPONSE = """<AttachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ attachment.volume.id }}</volumeId>
<instanceId>{{ attachment.instance.id }}</instanceId>
<device>{{ attachment.device }}</device>
<status>attaching</status>
<attachTime>{{attachment.attach_time}}</attachTime>
</AttachVolumeResponse>"""
DETATCH_VOLUME_RESPONSE = """<DetachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ attachment.volume.id }}</volumeId>
<instanceId>{{ attachment.instance.id }}</instanceId>
<device>{{ attachment.device }}</device>
<status>detaching</status>
<attachTime>2013-10-04T17:38:53.000Z</attachTime>
</DetachVolumeResponse>"""
CREATE_SNAPSHOT_RESPONSE = """<CreateSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId>
<status>pending</status>
<startTime>{{ snapshot.start_time}}</startTime>
<progress>60%</progress>
<ownerId>{{ snapshot.owner_id }}</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description>
<encrypted>{{ snapshot.encrypted }}</encrypted>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</CreateSnapshotResponse>"""
COPY_SNAPSHOT_RESPONSE = """<CopySnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotId>{{ snapshot.id }}</snapshotId>
</CopySnapshotResponse>"""
DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotSet>
{% for snapshot in snapshots %}
<item>
<snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId>
<status>{{ snapshot.status }}</status>
<startTime>{{ snapshot.start_time}}</startTime>
<progress>100%</progress>
<ownerId>{{ snapshot.owner_id }}</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description>
<encrypted>{{ snapshot.encrypted }}</encrypted>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</snapshotSet>
</DescribeSnapshotsResponse>"""
DELETE_SNAPSHOT_RESPONSE = """<DeleteSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteSnapshotResponse>"""
DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE = """
<DescribeSnapshotAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>a9540c9f-161a-45d8-9cc1-1182b89ad69f</requestId>
<snapshotId>snap-a0332ee0</snapshotId>
<createVolumePermission>
{% for group in groups %}
<item>
<group>{{ group }}</group>
</item>
{% endfor %}
{% for userId in userIds %}
<item>
<userId>{{ userId }}</userId>
</item>
{% endfor %}
</createVolumePermission>
</DescribeSnapshotAttributeResponse>
"""
MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE = """
<ModifySnapshotAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>666d2944-9276-4d6a-be12-1f4ada972fd8</requestId>
<return>true</return>
</ModifySnapshotAttributeResponse>
"""
| 41.452381 | 109 | 0.635482 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
class ElasticBlockStore(BaseResponse):
def attach_volume(self):
volume_id = self._get_param("VolumeId")
instance_id = self._get_param("InstanceId")
device_path = self._get_param("Device")
if self.is_not_dryrun("AttachVolume"):
attachment = self.ec2_backend.attach_volume(
volume_id, instance_id, device_path
)
template = self.response_template(ATTACHED_VOLUME_RESPONSE)
return template.render(attachment=attachment)
def copy_snapshot(self):
source_snapshot_id = self._get_param("SourceSnapshotId")
source_region = self._get_param("SourceRegion")
description = self._get_param("Description")
if self.is_not_dryrun("CopySnapshot"):
snapshot = self.ec2_backend.copy_snapshot(
source_snapshot_id, source_region, description
)
template = self.response_template(COPY_SNAPSHOT_RESPONSE)
return template.render(snapshot=snapshot)
def create_snapshot(self):
volume_id = self._get_param("VolumeId")
description = self._get_param("Description")
tags = self._parse_tag_specification("TagSpecification")
snapshot_tags = tags.get("snapshot", {})
if self.is_not_dryrun("CreateSnapshot"):
snapshot = self.ec2_backend.create_snapshot(volume_id, description)
snapshot.add_tags(snapshot_tags)
template = self.response_template(CREATE_SNAPSHOT_RESPONSE)
return template.render(snapshot=snapshot)
def create_volume(self):
size = self._get_param("Size")
zone = self._get_param("AvailabilityZone")
snapshot_id = self._get_param("SnapshotId")
tags = self._parse_tag_specification("TagSpecification")
volume_tags = tags.get("volume", {})
encrypted = self._get_param("Encrypted", if_none=False)
if self.is_not_dryrun("CreateVolume"):
volume = self.ec2_backend.create_volume(size, zone, snapshot_id, encrypted)
volume.add_tags(volume_tags)
template = self.response_template(CREATE_VOLUME_RESPONSE)
return template.render(volume=volume)
def delete_snapshot(self):
snapshot_id = self._get_param("SnapshotId")
if self.is_not_dryrun("DeleteSnapshot"):
self.ec2_backend.delete_snapshot(snapshot_id)
return DELETE_SNAPSHOT_RESPONSE
def delete_volume(self):
volume_id = self._get_param("VolumeId")
if self.is_not_dryrun("DeleteVolume"):
self.ec2_backend.delete_volume(volume_id)
return DELETE_VOLUME_RESPONSE
def describe_snapshots(self):
filters = filters_from_querystring(self.querystring)
snapshot_ids = self._get_multi_param("SnapshotId")
snapshots = self.ec2_backend.describe_snapshots(
snapshot_ids=snapshot_ids, filters=filters
)
template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE)
return template.render(snapshots=snapshots)
def describe_volumes(self):
filters = filters_from_querystring(self.querystring)
volume_ids = self._get_multi_param("VolumeId")
volumes = self.ec2_backend.describe_volumes(
volume_ids=volume_ids, filters=filters
)
template = self.response_template(DESCRIBE_VOLUMES_RESPONSE)
return template.render(volumes=volumes)
def describe_volume_attribute(self):
raise NotImplementedError(
"ElasticBlockStore.describe_volume_attribute is not yet implemented"
)
def describe_volume_status(self):
raise NotImplementedError(
"ElasticBlockStore.describe_volume_status is not yet implemented"
)
def detach_volume(self):
volume_id = self._get_param("VolumeId")
instance_id = self._get_param("InstanceId")
device_path = self._get_param("Device")
if self.is_not_dryrun("DetachVolume"):
attachment = self.ec2_backend.detach_volume(
volume_id, instance_id, device_path
)
template = self.response_template(DETATCH_VOLUME_RESPONSE)
return template.render(attachment=attachment)
def enable_volume_io(self):
if self.is_not_dryrun("EnableVolumeIO"):
raise NotImplementedError(
"ElasticBlockStore.enable_volume_io is not yet implemented"
)
def import_volume(self):
if self.is_not_dryrun("ImportVolume"):
raise NotImplementedError(
"ElasticBlockStore.import_volume is not yet implemented"
)
def describe_snapshot_attribute(self):
snapshot_id = self._get_param("SnapshotId")
groups = self.ec2_backend.get_create_volume_permission_groups(snapshot_id)
user_ids = self.ec2_backend.get_create_volume_permission_userids(snapshot_id)
template = self.response_template(DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE)
return template.render(snapshot_id=snapshot_id, groups=groups, userIds=user_ids)
def modify_snapshot_attribute(self):
snapshot_id = self._get_param("SnapshotId")
operation_type = self._get_param("OperationType")
groups = self._get_multi_param("UserGroup")
user_ids = self._get_multi_param("UserId")
if self.is_not_dryrun("ModifySnapshotAttribute"):
if operation_type == "add":
self.ec2_backend.add_create_volume_permission(
snapshot_id, user_ids=user_ids, groups=groups
)
elif operation_type == "remove":
self.ec2_backend.remove_create_volume_permission(
snapshot_id, user_ids=user_ids, groups=groups
)
return MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE
def modify_volume_attribute(self):
if self.is_not_dryrun("ModifyVolumeAttribute"):
raise NotImplementedError(
"ElasticBlockStore.modify_volume_attribute is not yet implemented"
)
def reset_snapshot_attribute(self):
if self.is_not_dryrun("ResetSnapshotAttribute"):
raise NotImplementedError(
"ElasticBlockStore.reset_snapshot_attribute is not yet implemented"
)
CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size>
{% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<encrypted>{{ volume.encrypted }}</encrypted>
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>creating</status>
<createTime>{{ volume.create_time}}</createTime>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</CreateVolumeResponse>"""
DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeSet>
{% for volume in volumes %}
<item>
<volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size>
{% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<encrypted>{{ volume.encrypted }}</encrypted>
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>{{ volume.status }}</status>
<createTime>{{ volume.create_time}}</createTime>
<attachmentSet>
{% if volume.attachment %}
<item>
<volumeId>{{ volume.id }}</volumeId>
<instanceId>{{ volume.attachment.instance.id }}</instanceId>
<device>{{ volume.attachment.device }}</device>
<status>attached</status>
<attachTime>{{volume.attachment.attach_time}}</attachTime>
<deleteOnTermination>false</deleteOnTermination>
</item>
{% endif %}
</attachmentSet>
{% if volume.get_tags() %}
<tagSet>
{% for tag in volume.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
{% endif %}
<volumeType>standard</volumeType>
</item>
{% endfor %}
</volumeSet>
</DescribeVolumesResponse>"""
DELETE_VOLUME_RESPONSE = """<DeleteVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteVolumeResponse>"""
ATTACHED_VOLUME_RESPONSE = """<AttachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ attachment.volume.id }}</volumeId>
<instanceId>{{ attachment.instance.id }}</instanceId>
<device>{{ attachment.device }}</device>
<status>attaching</status>
<attachTime>{{attachment.attach_time}}</attachTime>
</AttachVolumeResponse>"""
DETATCH_VOLUME_RESPONSE = """<DetachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ attachment.volume.id }}</volumeId>
<instanceId>{{ attachment.instance.id }}</instanceId>
<device>{{ attachment.device }}</device>
<status>detaching</status>
<attachTime>2013-10-04T17:38:53.000Z</attachTime>
</DetachVolumeResponse>"""
CREATE_SNAPSHOT_RESPONSE = """<CreateSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId>
<status>pending</status>
<startTime>{{ snapshot.start_time}}</startTime>
<progress>60%</progress>
<ownerId>{{ snapshot.owner_id }}</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description>
<encrypted>{{ snapshot.encrypted }}</encrypted>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</CreateSnapshotResponse>"""
COPY_SNAPSHOT_RESPONSE = """<CopySnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotId>{{ snapshot.id }}</snapshotId>
</CopySnapshotResponse>"""
DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotSet>
{% for snapshot in snapshots %}
<item>
<snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId>
<status>{{ snapshot.status }}</status>
<startTime>{{ snapshot.start_time}}</startTime>
<progress>100%</progress>
<ownerId>{{ snapshot.owner_id }}</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description>
<encrypted>{{ snapshot.encrypted }}</encrypted>
<tagSet>
{% for tag in snapshot.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</snapshotSet>
</DescribeSnapshotsResponse>"""
DELETE_SNAPSHOT_RESPONSE = """<DeleteSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteSnapshotResponse>"""
DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE = """
<DescribeSnapshotAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>a9540c9f-161a-45d8-9cc1-1182b89ad69f</requestId>
<snapshotId>snap-a0332ee0</snapshotId>
<createVolumePermission>
{% for group in groups %}
<item>
<group>{{ group }}</group>
</item>
{% endfor %}
{% for userId in userIds %}
<item>
<userId>{{ userId }}</userId>
</item>
{% endfor %}
</createVolumePermission>
</DescribeSnapshotAttributeResponse>
"""
MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE = """
<ModifySnapshotAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>666d2944-9276-4d6a-be12-1f4ada972fd8</requestId>
<return>true</return>
</ModifySnapshotAttributeResponse>
"""
| true | true |
f7f4e1be58924666f53cea549c8bf28f7173b9f3 | 6,505 | py | Python | devstack/cfg.py | hagleitn/Openstack-Devstack2 | 88d3effc70c6479bba276856285dcb3974d76261 | [
"Apache-2.0"
] | 1 | 2015-02-21T05:30:46.000Z | 2015-02-21T05:30:46.000Z | devstack/cfg.py | hagleitn/Openstack-Devstack2 | 88d3effc70c6479bba276856285dcb3974d76261 | [
"Apache-2.0"
] | null | null | null | devstack/cfg.py | hagleitn/Openstack-Devstack2 | 88d3effc70c6479bba276856285dcb3974d76261 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import ConfigParser
from devstack import cfg_helpers
from devstack import date
from devstack import env
from devstack import exceptions as excp
from devstack import log as logging
from devstack import settings
from devstack import shell as sh
from devstack import utils
LOG = logging.getLogger("devstack.cfg")
ENV_PAT = re.compile(r"^\s*\$\{([\w\d]+):\-(.*)\}\s*$")
SUB_MATCH = re.compile(r"(?:\$\(([\w\d]+):([\w\d]+))\)")
CACHE_MSG = "(value will now be internally cached)"
def get_config(cfg_fn=None, cfg_cls=None):
if not cfg_fn:
cfg_fn = sh.canon_path(settings.STACK_CONFIG_LOCATION)
if not cfg_cls:
cfg_cls = StackConfigParser
config_instance = cfg_cls()
config_instance.read(cfg_fn)
return config_instance
class IgnoreMissingConfigParser(ConfigParser.RawConfigParser):
DEF_INT = 0
DEF_FLOAT = 0.0
DEF_BOOLEAN = False
DEF_BASE = None
def __init__(self):
ConfigParser.RawConfigParser.__init__(self)
#make option names case sensitive
self.optionxform = str
def get(self, section, option):
value = IgnoreMissingConfigParser.DEF_BASE
try:
value = ConfigParser.RawConfigParser.get(self, section, option)
except ConfigParser.NoSectionError:
pass
except ConfigParser.NoOptionError:
pass
return value
def getboolean(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_BOOLEAN
return ConfigParser.RawConfigParser.getboolean(self, section, option)
def getfloat(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_FLOAT
return ConfigParser.RawConfigParser.getfloat(self, section, option)
def getint(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_INT
return ConfigParser.RawConfigParser.getint(self, section, option)
class StackConfigParser(IgnoreMissingConfigParser):
def __init__(self):
IgnoreMissingConfigParser.__init__(self)
self.configs_fetched = dict()
def _resolve_value(self, section, option, value_gotten):
if section == 'host' and option == 'ip':
LOG.debug("Host ip from configuration/environment was empty, programatically attempting to determine it.")
value_gotten = utils.get_host_ip()
LOG.debug("Determined your host ip to be: [%s]" % (value_gotten))
return value_gotten
def getdefaulted(self, section, option, default_val):
val = self.get(section, option)
if not val or not val.strip():
LOG.debug("Value [%s] found was not good enough, returning provided default [%s]" % (val, default_val))
return default_val
return val
def get(self, section, option):
key = cfg_helpers.make_id(section, option)
if key in self.configs_fetched:
value = self.configs_fetched.get(key)
LOG.debug("Fetched cached value [%s] for param [%s]" % (value, key))
else:
LOG.debug("Fetching value for param [%s]" % (key))
gotten_value = self._get_bashed(section, option)
value = self._resolve_value(section, option, gotten_value)
LOG.debug("Fetched [%s] for [%s] %s" % (value, key, CACHE_MSG))
self.configs_fetched[key] = value
return value
def set(self, section, option, value):
key = cfg_helpers.make_id(section, option)
LOG.audit("Setting config value [%s] for param [%s]" % (value, key))
self.configs_fetched[key] = value
IgnoreMissingConfigParser.set(self, section, option, value)
def _resolve_replacements(self, value):
LOG.debug("Performing simple replacement on [%s]", value)
#allow for our simple replacement to occur
def replacer(match):
section = match.group(1)
option = match.group(2)
return self.getdefaulted(section, option, '')
return SUB_MATCH.sub(replacer, value)
def _get_bashed(self, section, option):
value = IgnoreMissingConfigParser.get(self, section, option)
if value is None:
return value
extracted_val = ''
mtch = ENV_PAT.match(value)
if mtch:
env_key = mtch.group(1).strip()
def_val = mtch.group(2).strip()
if not def_val and not env_key:
msg = "Invalid bash-like value [%s]" % (value)
raise excp.BadParamException(msg)
env_value = env.get_key(env_key)
if env_value is None:
LOG.debug("Extracting value from config provided default value [%s]" % (def_val))
extracted_val = self._resolve_replacements(def_val)
LOG.debug("Using config provided default value [%s] (no environment key)" % (extracted_val))
else:
extracted_val = env_value
LOG.debug("Using enviroment provided value [%s]" % (extracted_val))
else:
extracted_val = value
LOG.debug("Using raw config provided value [%s]" % (extracted_val))
return extracted_val
def add_header(fn, contents):
lines = list()
lines.append('# Adjusted source file %s' % (fn.strip()))
lines.append("# On %s" % (date.rcf8222date()))
lines.append("# By user %s, group %s" % (sh.getuser(), sh.getgroupname()))
lines.append("# Comments may have been removed (TODO: darn python config writer)")
# TODO Maybe use https://code.google.com/p/iniparse/ which seems to preserve comments!
lines.append("")
if contents:
lines.append(contents)
return utils.joinlinesep(*lines)
| 38.720238 | 118 | 0.651345 |
import re
import ConfigParser
from devstack import cfg_helpers
from devstack import date
from devstack import env
from devstack import exceptions as excp
from devstack import log as logging
from devstack import settings
from devstack import shell as sh
from devstack import utils
LOG = logging.getLogger("devstack.cfg")
ENV_PAT = re.compile(r"^\s*\$\{([\w\d]+):\-(.*)\}\s*$")
SUB_MATCH = re.compile(r"(?:\$\(([\w\d]+):([\w\d]+))\)")
CACHE_MSG = "(value will now be internally cached)"
def get_config(cfg_fn=None, cfg_cls=None):
if not cfg_fn:
cfg_fn = sh.canon_path(settings.STACK_CONFIG_LOCATION)
if not cfg_cls:
cfg_cls = StackConfigParser
config_instance = cfg_cls()
config_instance.read(cfg_fn)
return config_instance
class IgnoreMissingConfigParser(ConfigParser.RawConfigParser):
DEF_INT = 0
DEF_FLOAT = 0.0
DEF_BOOLEAN = False
DEF_BASE = None
def __init__(self):
ConfigParser.RawConfigParser.__init__(self)
self.optionxform = str
def get(self, section, option):
value = IgnoreMissingConfigParser.DEF_BASE
try:
value = ConfigParser.RawConfigParser.get(self, section, option)
except ConfigParser.NoSectionError:
pass
except ConfigParser.NoOptionError:
pass
return value
def getboolean(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_BOOLEAN
return ConfigParser.RawConfigParser.getboolean(self, section, option)
def getfloat(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_FLOAT
return ConfigParser.RawConfigParser.getfloat(self, section, option)
def getint(self, section, option):
if not self.has_option(section, option):
return IgnoreMissingConfigParser.DEF_INT
return ConfigParser.RawConfigParser.getint(self, section, option)
class StackConfigParser(IgnoreMissingConfigParser):
def __init__(self):
IgnoreMissingConfigParser.__init__(self)
self.configs_fetched = dict()
def _resolve_value(self, section, option, value_gotten):
if section == 'host' and option == 'ip':
LOG.debug("Host ip from configuration/environment was empty, programatically attempting to determine it.")
value_gotten = utils.get_host_ip()
LOG.debug("Determined your host ip to be: [%s]" % (value_gotten))
return value_gotten
def getdefaulted(self, section, option, default_val):
val = self.get(section, option)
if not val or not val.strip():
LOG.debug("Value [%s] found was not good enough, returning provided default [%s]" % (val, default_val))
return default_val
return val
def get(self, section, option):
key = cfg_helpers.make_id(section, option)
if key in self.configs_fetched:
value = self.configs_fetched.get(key)
LOG.debug("Fetched cached value [%s] for param [%s]" % (value, key))
else:
LOG.debug("Fetching value for param [%s]" % (key))
gotten_value = self._get_bashed(section, option)
value = self._resolve_value(section, option, gotten_value)
LOG.debug("Fetched [%s] for [%s] %s" % (value, key, CACHE_MSG))
self.configs_fetched[key] = value
return value
def set(self, section, option, value):
key = cfg_helpers.make_id(section, option)
LOG.audit("Setting config value [%s] for param [%s]" % (value, key))
self.configs_fetched[key] = value
IgnoreMissingConfigParser.set(self, section, option, value)
def _resolve_replacements(self, value):
LOG.debug("Performing simple replacement on [%s]", value)
def replacer(match):
section = match.group(1)
option = match.group(2)
return self.getdefaulted(section, option, '')
return SUB_MATCH.sub(replacer, value)
def _get_bashed(self, section, option):
value = IgnoreMissingConfigParser.get(self, section, option)
if value is None:
return value
extracted_val = ''
mtch = ENV_PAT.match(value)
if mtch:
env_key = mtch.group(1).strip()
def_val = mtch.group(2).strip()
if not def_val and not env_key:
msg = "Invalid bash-like value [%s]" % (value)
raise excp.BadParamException(msg)
env_value = env.get_key(env_key)
if env_value is None:
LOG.debug("Extracting value from config provided default value [%s]" % (def_val))
extracted_val = self._resolve_replacements(def_val)
LOG.debug("Using config provided default value [%s] (no environment key)" % (extracted_val))
else:
extracted_val = env_value
LOG.debug("Using enviroment provided value [%s]" % (extracted_val))
else:
extracted_val = value
LOG.debug("Using raw config provided value [%s]" % (extracted_val))
return extracted_val
def add_header(fn, contents):
lines = list()
lines.append('# Adjusted source file %s' % (fn.strip()))
lines.append("# On %s" % (date.rcf8222date()))
lines.append("# By user %s, group %s" % (sh.getuser(), sh.getgroupname()))
lines.append("# Comments may have been removed (TODO: darn python config writer)")
lines.append("")
if contents:
lines.append(contents)
return utils.joinlinesep(*lines)
| true | true |
f7f4e231f867b658ad307a1e5ae115c7c45bb538 | 416 | py | Python | produto/admin.py | MatheusSaraiva/ecommerce | c508af86c89e772e0f44ec4b986a9aec88b34569 | [
"MIT"
] | null | null | null | produto/admin.py | MatheusSaraiva/ecommerce | c508af86c89e772e0f44ec4b986a9aec88b34569 | [
"MIT"
] | null | null | null | produto/admin.py | MatheusSaraiva/ecommerce | c508af86c89e772e0f44ec4b986a9aec88b34569 | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
class VariacaoInline(admin.TabularInline):
model = models.Variacao
extra = 1
class ProdutoAdmin(admin.ModelAdmin):
list_display = ['nome', 'descricao_curta', 'get_preco_formatado', 'get_preco_promocional_formatado']
inlines = [
VariacaoInline
]
admin.site.register(models.Produto, ProdutoAdmin)
admin.site.register(models.Variacao) | 27.733333 | 104 | 0.752404 | from django.contrib import admin
from . import models
class VariacaoInline(admin.TabularInline):
model = models.Variacao
extra = 1
class ProdutoAdmin(admin.ModelAdmin):
list_display = ['nome', 'descricao_curta', 'get_preco_formatado', 'get_preco_promocional_formatado']
inlines = [
VariacaoInline
]
admin.site.register(models.Produto, ProdutoAdmin)
admin.site.register(models.Variacao) | true | true |
f7f4e2ef51489ef0b4957f5413e828ba25fd240f | 856 | py | Python | habitbreaker.py | Blaze-rahim/MLH_Day-2__habit_tracker | ffe9d6cf901ab640930e45e4700103691f01ce4c | [
"MIT"
] | null | null | null | habitbreaker.py | Blaze-rahim/MLH_Day-2__habit_tracker | ffe9d6cf901ab640930e45e4700103691f01ce4c | [
"MIT"
] | null | null | null | habitbreaker.py | Blaze-rahim/MLH_Day-2__habit_tracker | ffe9d6cf901ab640930e45e4700103691f01ce4c | [
"MIT"
] | null | null | null | from datetime import datetime
def habit_breaker(habit_name, startdate, cost_perday, mins_wasted):
goal = 30
wageperhour = 10
time_elapsed = (datetime.now()-startdate).total_seconds()
hours = round(time_elapsed /60 / 60 ,1)
days = round(hours/ 24, 2)
money_saved = cost_perday * days
mins_saved = round(days * mins_wasted)
total_money_saved = f"${round(money_saved + (mins_saved /60 * wageperhour),2)}"
days_to_go = round(goal - days)
if hours>72:
hours = str(days) + " days"
else:
hours = str(hours) + " hours"
return {'habit' : habit_name, 'timesince' : hours , 'days_remaining' : days_to_go,
'mins_saved' : mins_saved, 'money_saved' : total_money_saved}
print(habit_breaker('coffee', datetime(2021, 7, 20, 20, 20), cost_perday=2, mins_wasted=15)) | 29.517241 | 92 | 0.642523 | from datetime import datetime
def habit_breaker(habit_name, startdate, cost_perday, mins_wasted):
goal = 30
wageperhour = 10
time_elapsed = (datetime.now()-startdate).total_seconds()
hours = round(time_elapsed /60 / 60 ,1)
days = round(hours/ 24, 2)
money_saved = cost_perday * days
mins_saved = round(days * mins_wasted)
total_money_saved = f"${round(money_saved + (mins_saved /60 * wageperhour),2)}"
days_to_go = round(goal - days)
if hours>72:
hours = str(days) + " days"
else:
hours = str(hours) + " hours"
return {'habit' : habit_name, 'timesince' : hours , 'days_remaining' : days_to_go,
'mins_saved' : mins_saved, 'money_saved' : total_money_saved}
print(habit_breaker('coffee', datetime(2021, 7, 20, 20, 20), cost_perday=2, mins_wasted=15)) | true | true |
f7f4e35718cc8b97d3dab336cfcc028fffff7792 | 6,072 | py | Python | eye_window.py | dguari1/Emotrics | 8b807d97663d6deb8efab7c74b31ee42f9218d1b | [
"MIT"
] | 5 | 2018-07-23T12:10:58.000Z | 2020-05-28T20:04:10.000Z | eye_window.py | dguari1/Emotrics | 8b807d97663d6deb8efab7c74b31ee42f9218d1b | [
"MIT"
] | 4 | 2021-02-15T11:31:56.000Z | 2022-03-14T20:24:45.000Z | eye_window.py | dguari1/Emotrics | 8b807d97663d6deb8efab7c74b31ee42f9218d1b | [
"MIT"
] | 10 | 2019-11-10T14:49:27.000Z | 2022-03-10T22:41:54.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 21:10:25 2017
@author: Diego L.Guarin -- diego_guarin at meei.harvard.edu
"""
from PyQt5 import QtWidgets, QtCore, QtGui
import numpy as np
from utilities import find_circle_from_points
"""
This window show the eye and allows the user to select 4 points around the iris,
it then fits a circle around these points. The user can accept the circle or
re-initialize the point selection
"""
class ProcessEye(QtWidgets.QDialog):
def __init__(self, image = None):
super(ProcessEye, self).__init__()
self.setWindowTitle('Eye Selection')
self._circle = None
self._image = image
self.label_title = QtWidgets.QLabel()
self.label_title.setText('Please click on four points around the iris')
#self.label_title.setWordWrap(True)
self.label_title.setMaximumWidth(500)
self.view = View(self)
if self._image is not None:
self.view._image = self._image
self.view.set_picture()
self.buttonReset = QtWidgets.QPushButton('Clear', self)
self.buttonReset.clicked.connect(self.view.handleClearView)
self.buttonDone = QtWidgets.QPushButton('Done',self)
self.buttonDone.clicked.connect(self.handleReturn)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(self.label_title,0,0,1,2)
layout.addWidget(self.view,1,0,1,2)
layout.addWidget(self.buttonDone,2,0,1,1)
layout.addWidget(self.buttonReset,2,1,1,1)
def handleReturn(self):
if self.view._counter == 4:
self._circle = self.view._circle
self.close()
class View(QtWidgets.QGraphicsView):
def __init__(self, parent=None):
super(View, self).__init__(parent)
self._scene = QtWidgets.QGraphicsScene(self)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._scene.addItem(self._photo)
self.setScene(self._scene)
self.setSceneRect(QtCore.QRectF(self.viewport().rect()))
#this counts the number of click, if it reaches 4 then it stops accepting
#more points and draws the cirlce
self._counter = 0
self._circle = None
#this accomulates the position of the clicks
self._mouse_pos= np.array([]).reshape(0,2)
self._image = None
# pen = QtGui.QPen(QtCore.Qt.green)
# Rec= QtCore.QRectF(150, 150,300,300)
# self.scene().addEllipse(Rec, pen)
def process_circle(self):
x = np.array([self._mouse_pos[0,0],self._mouse_pos[1,0],self._mouse_pos[2,0],self._mouse_pos[3,0]])
y = np.array([self._mouse_pos[0,1],self._mouse_pos[1,1],self._mouse_pos[2,1],self._mouse_pos[3,1]])
circle = find_circle_from_points(x,y)
self._circle = [int(circle[0]),int(circle[1]),int(circle[2])]
Ellipse = QtWidgets.QGraphicsEllipseItem(0,0,self._circle[2]*2,self._circle[2]*2)
#ellipse will be green
pen = QtGui.QPen(QtCore.Qt.green)
Ellipse.setPen(pen)
#if I want to fill the ellipse i should do this:
#brush = QtGui.QBrush(QtCore.Qt.green)
#Ellipse.setPen(brush)
#this is the position of the top-left corner of the ellipse.......
Ellipse.setPos(circle[0]-self._circle[2],circle[1]-self._circle[2])
Ellipse.setTransform(QtGui.QTransform())
self._scene.addItem(Ellipse)
def mousePressEvent(self,event):
if self._counter < 4:
scenePos = self.mapToScene(event.pos())
x = scenePos.x()
y = scenePos.y()
self._mouse_pos = np.concatenate((self._mouse_pos, [[float(x),float(y)]]), axis=0)
pen = QtGui.QPen(QtCore.Qt.red)
brush = QtGui.QBrush(QtCore.Qt.red)
Rec= QtCore.QRectF(x, y,int(self._scene.width()*(1/100)+1),int(self._scene.width()*(1/100)+1))
self._scene.addEllipse(Rec, pen, brush)
QtWidgets.QGraphicsView.mousePressEvent(self, event)
def mouseReleaseEvent(self,event):
# start = QtCore.QPointF(self.mapToScene(self._start))
# end = QtCore.QPointF(self.mapToScene(event.pos()))
# self.scene().addItem(QtWidgets.QGraphicsLineItem(QtCore.QLineF(start, end)))
# for point in (start, end):
# text = self.scene().addSimpleText('(%d, %d)' % (point.x(), point.y()))
# text.setBrush(QtCore.Qt.red)
# text.setPos(point)
self._counter +=1
if self._counter == 4:
self.process_circle()
QtWidgets.QGraphicsView.mouseReleaseEvent(self, event)
def set_picture(self):
image = self._image.copy()
height, width, channel = image.shape
bytesPerLine = 3 * width
img_Qt = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
img_show = QtGui.QPixmap.fromImage(img_Qt)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._photo.setPixmap(img_show)
self._scene.addItem(self._photo)
rect = QtCore.QRectF(self._photo.pixmap().rect())
self.fitInView(rect)
self.setSceneRect(rect)
def resizeEvent(self, event):
rect = QtCore.QRectF(self._photo.pixmap().rect())
self.fitInView(rect)
self.setSceneRect(rect)
def handleClearView(self):
self._scene.clear()
#self.scene().removeItem(self._photo)
self.set_picture()
self._circle = None
self._counter = 0
self._mouse_pos= np.array([]).reshape(0,2)
if __name__ == '__main__':
import sys
if not QtWidgets.QApplication.instance():
app = QtWidgets.QApplication(sys.argv)
else:
app = QtWidgets.QApplication.instance()
GUI = ProcessEye()
#GUI.resize(640, 480)
GUI.show()
sys.exit(app.exec_())
| 35.928994 | 107 | 0.610343 |
from PyQt5 import QtWidgets, QtCore, QtGui
import numpy as np
from utilities import find_circle_from_points
class ProcessEye(QtWidgets.QDialog):
def __init__(self, image = None):
super(ProcessEye, self).__init__()
self.setWindowTitle('Eye Selection')
self._circle = None
self._image = image
self.label_title = QtWidgets.QLabel()
self.label_title.setText('Please click on four points around the iris')
self.label_title.setMaximumWidth(500)
self.view = View(self)
if self._image is not None:
self.view._image = self._image
self.view.set_picture()
self.buttonReset = QtWidgets.QPushButton('Clear', self)
self.buttonReset.clicked.connect(self.view.handleClearView)
self.buttonDone = QtWidgets.QPushButton('Done',self)
self.buttonDone.clicked.connect(self.handleReturn)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(self.label_title,0,0,1,2)
layout.addWidget(self.view,1,0,1,2)
layout.addWidget(self.buttonDone,2,0,1,1)
layout.addWidget(self.buttonReset,2,1,1,1)
def handleReturn(self):
if self.view._counter == 4:
self._circle = self.view._circle
self.close()
class View(QtWidgets.QGraphicsView):
def __init__(self, parent=None):
super(View, self).__init__(parent)
self._scene = QtWidgets.QGraphicsScene(self)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._scene.addItem(self._photo)
self.setScene(self._scene)
self.setSceneRect(QtCore.QRectF(self.viewport().rect()))
self._counter = 0
self._circle = None
self._mouse_pos= np.array([]).reshape(0,2)
self._image = None
def process_circle(self):
x = np.array([self._mouse_pos[0,0],self._mouse_pos[1,0],self._mouse_pos[2,0],self._mouse_pos[3,0]])
y = np.array([self._mouse_pos[0,1],self._mouse_pos[1,1],self._mouse_pos[2,1],self._mouse_pos[3,1]])
circle = find_circle_from_points(x,y)
self._circle = [int(circle[0]),int(circle[1]),int(circle[2])]
Ellipse = QtWidgets.QGraphicsEllipseItem(0,0,self._circle[2]*2,self._circle[2]*2)
pen = QtGui.QPen(QtCore.Qt.green)
Ellipse.setPen(pen)
Ellipse.setPos(circle[0]-self._circle[2],circle[1]-self._circle[2])
Ellipse.setTransform(QtGui.QTransform())
self._scene.addItem(Ellipse)
def mousePressEvent(self,event):
if self._counter < 4:
scenePos = self.mapToScene(event.pos())
x = scenePos.x()
y = scenePos.y()
self._mouse_pos = np.concatenate((self._mouse_pos, [[float(x),float(y)]]), axis=0)
pen = QtGui.QPen(QtCore.Qt.red)
brush = QtGui.QBrush(QtCore.Qt.red)
Rec= QtCore.QRectF(x, y,int(self._scene.width()*(1/100)+1),int(self._scene.width()*(1/100)+1))
self._scene.addEllipse(Rec, pen, brush)
QtWidgets.QGraphicsView.mousePressEvent(self, event)
def mouseReleaseEvent(self,event):
self._counter +=1
if self._counter == 4:
self.process_circle()
QtWidgets.QGraphicsView.mouseReleaseEvent(self, event)
def set_picture(self):
image = self._image.copy()
height, width, channel = image.shape
bytesPerLine = 3 * width
img_Qt = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
img_show = QtGui.QPixmap.fromImage(img_Qt)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._photo.setPixmap(img_show)
self._scene.addItem(self._photo)
rect = QtCore.QRectF(self._photo.pixmap().rect())
self.fitInView(rect)
self.setSceneRect(rect)
def resizeEvent(self, event):
rect = QtCore.QRectF(self._photo.pixmap().rect())
self.fitInView(rect)
self.setSceneRect(rect)
def handleClearView(self):
self._scene.clear()
self.set_picture()
self._circle = None
self._counter = 0
self._mouse_pos= np.array([]).reshape(0,2)
if __name__ == '__main__':
import sys
if not QtWidgets.QApplication.instance():
app = QtWidgets.QApplication(sys.argv)
else:
app = QtWidgets.QApplication.instance()
GUI = ProcessEye()
GUI.show()
sys.exit(app.exec_())
| true | true |
f7f4e41ffa00a98ed77dac9e10afbb609cccf4d3 | 2,171 | py | Python | assignments/2019/assignment1/cs231n/vis_utils.py | comratvlad/cs231n.github.io | 63c72c3e8e88a6edfea7db7df604d715416ba15b | [
"MIT"
] | null | null | null | assignments/2019/assignment1/cs231n/vis_utils.py | comratvlad/cs231n.github.io | 63c72c3e8e88a6edfea7db7df604d715416ba15b | [
"MIT"
] | null | null | null | assignments/2019/assignment1/cs231n/vis_utils.py | comratvlad/cs231n.github.io | 63c72c3e8e88a6edfea7db7df604d715416ba15b | [
"MIT"
] | null | null | null | from builtins import range
from math import sqrt, ceil
import numpy as np
def visualize_grid(Xs, ubound=255.0, padding=1):
"""
Reshape a 4D tensor of image data to a grid for easy visualization.
Inputs:
- Xs: Data of shape (N, H, W, C)
- ubound: Output grid will have values scaled to the range [0, ubound]
- padding: The number of blank pixels between elements of the grid
"""
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in range(grid_size):
x0, x1 = 0, W
for x in range(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
# grid[y0:y1, x0:x1] = Xs[next_idx]
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
# grid_max = np.max(grid)
# grid_min = np.min(grid)
# grid = ubound * (grid - grid_min) / (grid_max - grid_min)
return grid
def vis_grid(Xs):
""" visualize a grid of images """
(N, H, W, C) = Xs.shape
A = int(ceil(sqrt(N)))
G = np.ones((A*H+A, A*W+A, C), Xs.dtype)
G *= np.min(Xs)
n = 0
for y in range(A):
for x in range(A):
if n < N:
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = Xs[n,:,:,:]
n += 1
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
def vis_nn(rows):
""" visualize array of arrays of images """
N = len(rows)
D = len(rows[0])
H,W,C = rows[0][0].shape
Xs = rows[0][0]
G = np.ones((N*H+N, D*W+D, C), Xs.dtype)
for y in range(N):
for x in range(D):
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = rows[y][x]
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
| 28.565789 | 74 | 0.505758 | from builtins import range
from math import sqrt, ceil
import numpy as np
def visualize_grid(Xs, ubound=255.0, padding=1):
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in range(grid_size):
x0, x1 = 0, W
for x in range(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
return grid
def vis_grid(Xs):
(N, H, W, C) = Xs.shape
A = int(ceil(sqrt(N)))
G = np.ones((A*H+A, A*W+A, C), Xs.dtype)
G *= np.min(Xs)
n = 0
for y in range(A):
for x in range(A):
if n < N:
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = Xs[n,:,:,:]
n += 1
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
def vis_nn(rows):
N = len(rows)
D = len(rows[0])
H,W,C = rows[0][0].shape
Xs = rows[0][0]
G = np.ones((N*H+N, D*W+D, C), Xs.dtype)
for y in range(N):
for x in range(D):
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = rows[y][x]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
| true | true |
f7f4e42209e5860c9c5a01c292fc1d1c54d20d72 | 832 | py | Python | src/pretix/base/migrations/0177_auto_20210301_1510.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 1,248 | 2015-04-24T13:32:06.000Z | 2022-03-29T07:01:36.000Z | src/pretix/base/migrations/0177_auto_20210301_1510.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 2,113 | 2015-02-18T18:58:16.000Z | 2022-03-31T11:12:32.000Z | src/pretix/base/migrations/0177_auto_20210301_1510.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 453 | 2015-05-13T09:29:06.000Z | 2022-03-24T13:39:16.000Z | # Generated by Django 3.0.10 on 2021-03-01 15:10
import phonenumber_field.modelfields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0176_auto_20210205_1512'),
]
operations = [
migrations.AddField(
model_name='waitinglistentry',
name='name_cached',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='waitinglistentry',
name='name_parts',
field=models.JSONField(default=dict),
),
migrations.AddField(
model_name='waitinglistentry',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region=None),
),
]
| 26.83871 | 105 | 0.616587 |
import phonenumber_field.modelfields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0176_auto_20210205_1512'),
]
operations = [
migrations.AddField(
model_name='waitinglistentry',
name='name_cached',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='waitinglistentry',
name='name_parts',
field=models.JSONField(default=dict),
),
migrations.AddField(
model_name='waitinglistentry',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region=None),
),
]
| true | true |
f7f4e4ca17aea8afb366e724c048ce22a944d964 | 9,550 | py | Python | BiLiVideoConvert.py | LengGeng/BiLiVideoConvert | 909d6b2dbfb438967c5a8968830ebce953c76481 | [
"MIT"
] | 1 | 2022-03-23T13:21:21.000Z | 2022-03-23T13:21:21.000Z | BiLiVideoConvert.py | LengGeng/BiLiVideoConvert | 909d6b2dbfb438967c5a8968830ebce953c76481 | [
"MIT"
] | null | null | null | BiLiVideoConvert.py | LengGeng/BiLiVideoConvert | 909d6b2dbfb438967c5a8968830ebce953c76481 | [
"MIT"
] | null | null | null | import re
import os
import json
import warnings
from sys import argv
from getopt import getopt
from typing import Union
from subprocess import Popen
DEVNULL = open(os.devnull, 'w')
CONFIG = {}
CONFIG_PATH = "config.json"
FORMAT_VIDEO_NAME = "{i}、{title}-{name}"
class BiLiVideoConvert:
def __init__(self, input_dir: str = None, output_dir: str = None):
"""
input_dir 相当于 Android/data/tv.danmaku.bili/download 目录,即该文件夹下存在多个下载的视频项目
:param input_dir: 下载视频路径
:param output_dir: 转换后视频存放路径
"""
# 参数为空时读取配置文件,配置文件中不存在则使用默认配置
if input_dir is None:
input_dir = CONFIG.get("input_dir", "download")
if output_dir is None:
output_dir = CONFIG.get("output_dir", "output")
self.input_dir = input_dir
self.output_dir = output_dir
self.movie_dirs = os.listdir(input_dir)
self.movies = {}
def parse_movies(self):
for movie_info in self.get_movie_infos():
avid = movie_info.get("avid")
if avid:
avid = f"AV{avid}"
bvid = movie_info["bvid"]
season_id = movie_info["season_id"]
if season_id:
season_id = f"S_{season_id}"
vid = avid or bvid or season_id
# 不存在添加默认信息
if vid not in self.movies:
self.movies[vid] = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": movie_info['title'], # 标题
"total": 0, # 总量
"download_total": 0, # 下载总量
"page_data": [] # 视频Page数据
}
# 判断视频是否下载完成,添加分P数据
is_completed = movie_info['is_completed'] # 是否下载完成
self.movies[vid]["total"] += 1
page_data = {
"page": movie_info["page"],
"part": movie_info["part"],
"is_completed": is_completed
}
if is_completed:
self.movies[vid]["download_total"] += 1
page_data["video_path"] = movie_info["video_path"]
page_data["audio_path"] = movie_info["audio_path"]
self.movies[vid]["page_data"].append(page_data)
def get_movie_infos(self) -> dict:
"""
获取 input_dir 下视频项目的信息
:return:
"""
for movie_dir in self.movie_dirs:
# 拼接视频项目的绝对路径
movie_ads_dir = os.path.join(self.input_dir, movie_dir)
# 遍历视频项目下的目录
for folder_name, sub_folders, file_names in os.walk(movie_ads_dir):
entry_file = os.path.join(folder_name, "entry.json")
# 以存在entry.json文件为判断视频目录依据
if os.path.exists(entry_file):
# 解析 entry 文件
entry = parse_entry(entry_file)
if entry:
yield entry
# if movie_dir == str(entry['vid'])
def convert(self, vid: Union[int, str]):
# 视频项目目录
if vid in self.movies:
movie_info = self.movies.get(vid)
print(movie_info)
else:
print("无效的视频ID")
return
# 拼接视频输出目录
project_output_dir = filename_filter(os.path.join(self.output_dir, movie_info["title"]))
# 判断目录是否存在,没有就创建
if not os.path.exists(project_output_dir):
os.makedirs(project_output_dir)
# 转换视频
for page_data in movie_info["page_data"]:
# 判断视频是否下载完成
if page_data["is_completed"]:
# 获取格式化后的文件名
page_name = format_video_name(**movie_info, **page_data)
composite_video(
os.path.abspath(page_data["video_path"]),
os.path.abspath(page_data["audio_path"]),
os.path.abspath(os.path.join(project_output_dir, filename_filter(page_name)))
)
else:
print(f"{movie_info.get('title')}-{page_data.get('part')}未下载完成!")
def show_info(self):
"""
展示视频信息
:return:
"""
movies_list = []
for index, [vid, movie] in enumerate(self.movies.items()):
movies_list.append(vid)
print(f"{index + 1}、({vid: <12})[{movie['download_total']:-3}/{movie['total']:-3}] {movie['title']}")
index: str = input("请输入要转换的编号(all 全部, exit 退出): ")
if index == "all":
for vid in movies_list:
self.convert(vid)
elif index in ["exit"]:
print("用户退出")
exit(0)
else:
self.convert(movies_list[int(index) - 1])
def run(self):
"""
主程序
:return:
"""
print("开始解析视频信息...")
self.parse_movies()
print("解析视频信息完成")
self.show_info()
pass
def format_video_name(**video_info: dict) -> str:
"""
根据 FORMAT_VIDEO_NAME 格式化转换的视频文件名
{title} 视频标题
{name} {part} 视频名称
{i} {page} {index} 视频索引,从1开始
:param video_info: 视频信息
:return: 格式化后的文件名
"""
title = video_info.get("title", "")
part = video_info.get("part", "")
page = str(video_info.get("page", ""))
# TODO 判断视频名称是否包序号 part.startswith(page), 存在则不添加序号
result = FORMAT_VIDEO_NAME + ".mp4"
# 视频索引
result = result.replace("{i}", page)
result = result.replace("{index}", page)
result = result.replace("{page}", page)
# 视频名称
result = result.replace("{name}", part)
result = result.replace("{part}", part)
# 视频标题
result = result.replace("{title}", title)
return result
def composite_video(video_path: str, audio_path: str, out_path: str):
"""
合成mp4文件
:param video_path: 视频路径
:param audio_path: 音频路径
:param out_path: 输出路径
:return:
"""
# 生成合成命令
cmd = f'ffmpeg -y -i "{video_path}" -i "{audio_path}" -codec copy "{out_path}"'
print('*' * 50)
print("视频源:" + video_path)
print("音频源:" + audio_path)
print("输出源:" + out_path)
Popen(cmd, stderr=DEVNULL).wait()
def filename_filter(filename: str, repl: str = '') -> str:
"""
将文件名替换成合法的文件名
:param filename: 原文件名
:param repl: 替换字符
:return: 合法文件名
"""
return re.sub('[/:*?"<>|]', repl, filename)
def parse_entry(entry_file):
"""
解析视频配置(入口)文件
:param entry_file: 文件路径
:return: 视频信息
"""
# 打开文件
try:
with open(entry_file, 'r', encoding='utf-8') as fp:
entry: dict = json.load(fp)
# 解析媒体类型
media_type: int = entry.get('media_type') # 媒体类型,1的可能是blv格式
if media_type not in [2]:
# 不支持的媒体类型
warnings.warn(f"Warning Unsupported media type:{media_type} in {entry_file}")
return
# 解析视频 ID
avid: int = entry.get('avid') # avid
bvid: str = entry.get('bvid') # bvid
season_id: int = entry.get('season_id') # season_id, 番剧id
# 视频信息
title: str = entry.get("title") # 视频标题
is_completed: bool = entry.get("is_completed", False) # 是否下载完成
# 获取当前视频分集的信息数据
if avid or bvid:
page = entry["page_data"]["page"] # 视频索引
part = entry["page_data"]["part"] # 视频标题
if season_id:
page = entry["ep"]["page"]
part = entry["ep"]["index_title"]
item = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": title,
"is_completed": is_completed,
"page": page,
"part": part
}
# 判断视频下载完成, 获取视频文件及音频文件信息
if is_completed:
# 视频、音频下载目录
type_tag = entry.get('type_tag')
# 视频路径
video_path = os.path.join(os.path.dirname(entry_file), type_tag, "video.m4s")
if os.path.exists(video_path): # 判断文件是否存在
item["video_path"] = video_path
# 音频路径
audio_path = os.path.join(os.path.dirname(entry_file), type_tag, "audio.m4s")
if os.path.exists(audio_path): # 判断文件是否存在
item["audio_path"] = audio_path
return item
except json.decoder.JSONDecodeError as e:
# 文件无法解析
warnings.warn(f"Warning file could not parse: {entry_file} \n{e.msg}")
def get_command_args() -> tuple:
"""
获取命令行输入的参数
:return:
"""
i = o = None
opts, args = getopt(argv[1:], "i:o:")
for opt, arg in opts:
if opt in ["i"]:
i = arg
if opt in ["o"]:
o = arg
return i, o
def load_config():
"""
从文件读取配置
:return:
"""
try:
global CONFIG
with open(CONFIG_PATH, "r") as fp:
CONFIG = json.load(fp)
except FileNotFoundError:
print("create default config.")
CONFIG = {
"input_dir": "download",
"output_dir": "output"
}
refresh_config()
except json.decoder.JSONDecodeError:
print("读取配置文件错误,请检查配置文件,若无法使用可尝试删除配置文件。")
def refresh_config():
"""
保存配置到文件
:return:
"""
with open(CONFIG_PATH, 'w', encoding="utf-8") as fp:
json.dump(CONFIG, fp, ensure_ascii=False)
def main():
load_config()
video_convert = BiLiVideoConvert(*get_command_args())
video_convert.run()
if __name__ == '__main__':
main()
| 30.806452 | 113 | 0.52555 | import re
import os
import json
import warnings
from sys import argv
from getopt import getopt
from typing import Union
from subprocess import Popen
DEVNULL = open(os.devnull, 'w')
CONFIG = {}
CONFIG_PATH = "config.json"
FORMAT_VIDEO_NAME = "{i}、{title}-{name}"
class BiLiVideoConvert:
def __init__(self, input_dir: str = None, output_dir: str = None):
if input_dir is None:
input_dir = CONFIG.get("input_dir", "download")
if output_dir is None:
output_dir = CONFIG.get("output_dir", "output")
self.input_dir = input_dir
self.output_dir = output_dir
self.movie_dirs = os.listdir(input_dir)
self.movies = {}
def parse_movies(self):
for movie_info in self.get_movie_infos():
avid = movie_info.get("avid")
if avid:
avid = f"AV{avid}"
bvid = movie_info["bvid"]
season_id = movie_info["season_id"]
if season_id:
season_id = f"S_{season_id}"
vid = avid or bvid or season_id
if vid not in self.movies:
self.movies[vid] = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": movie_info['title'],
"total": 0,
"download_total": 0,
"page_data": []
}
is_completed = movie_info['is_completed']
self.movies[vid]["total"] += 1
page_data = {
"page": movie_info["page"],
"part": movie_info["part"],
"is_completed": is_completed
}
if is_completed:
self.movies[vid]["download_total"] += 1
page_data["video_path"] = movie_info["video_path"]
page_data["audio_path"] = movie_info["audio_path"]
self.movies[vid]["page_data"].append(page_data)
def get_movie_infos(self) -> dict:
for movie_dir in self.movie_dirs:
movie_ads_dir = os.path.join(self.input_dir, movie_dir)
for folder_name, sub_folders, file_names in os.walk(movie_ads_dir):
entry_file = os.path.join(folder_name, "entry.json")
if os.path.exists(entry_file):
entry = parse_entry(entry_file)
if entry:
yield entry
def convert(self, vid: Union[int, str]):
if vid in self.movies:
movie_info = self.movies.get(vid)
print(movie_info)
else:
print("无效的视频ID")
return
project_output_dir = filename_filter(os.path.join(self.output_dir, movie_info["title"]))
if not os.path.exists(project_output_dir):
os.makedirs(project_output_dir)
for page_data in movie_info["page_data"]:
if page_data["is_completed"]:
page_name = format_video_name(**movie_info, **page_data)
composite_video(
os.path.abspath(page_data["video_path"]),
os.path.abspath(page_data["audio_path"]),
os.path.abspath(os.path.join(project_output_dir, filename_filter(page_name)))
)
else:
print(f"{movie_info.get('title')}-{page_data.get('part')}未下载完成!")
def show_info(self):
movies_list = []
for index, [vid, movie] in enumerate(self.movies.items()):
movies_list.append(vid)
print(f"{index + 1}、({vid: <12})[{movie['download_total']:-3}/{movie['total']:-3}] {movie['title']}")
index: str = input("请输入要转换的编号(all 全部, exit 退出): ")
if index == "all":
for vid in movies_list:
self.convert(vid)
elif index in ["exit"]:
print("用户退出")
exit(0)
else:
self.convert(movies_list[int(index) - 1])
def run(self):
print("开始解析视频信息...")
self.parse_movies()
print("解析视频信息完成")
self.show_info()
pass
def format_video_name(**video_info: dict) -> str:
title = video_info.get("title", "")
part = video_info.get("part", "")
page = str(video_info.get("page", ""))
result = FORMAT_VIDEO_NAME + ".mp4"
result = result.replace("{i}", page)
result = result.replace("{index}", page)
result = result.replace("{page}", page)
result = result.replace("{name}", part)
result = result.replace("{part}", part)
result = result.replace("{title}", title)
return result
def composite_video(video_path: str, audio_path: str, out_path: str):
cmd = f'ffmpeg -y -i "{video_path}" -i "{audio_path}" -codec copy "{out_path}"'
print('*' * 50)
print("视频源:" + video_path)
print("音频源:" + audio_path)
print("输出源:" + out_path)
Popen(cmd, stderr=DEVNULL).wait()
def filename_filter(filename: str, repl: str = '') -> str:
return re.sub('[/:*?"<>|]', repl, filename)
def parse_entry(entry_file):
# 打开文件
try:
with open(entry_file, 'r', encoding='utf-8') as fp:
entry: dict = json.load(fp)
# 解析媒体类型
media_type: int = entry.get('media_type') # 媒体类型,1的可能是blv格式
if media_type not in [2]:
# 不支持的媒体类型
warnings.warn(f"Warning Unsupported media type:{media_type} in {entry_file}")
return
# 解析视频 ID
avid: int = entry.get('avid') # avid
bvid: str = entry.get('bvid') # bvid
season_id: int = entry.get('season_id') # season_id, 番剧id
# 视频信息
title: str = entry.get("title") # 视频标题
is_completed: bool = entry.get("is_completed", False) # 是否下载完成
# 获取当前视频分集的信息数据
if avid or bvid:
page = entry["page_data"]["page"] # 视频索引
part = entry["page_data"]["part"] # 视频标题
if season_id:
page = entry["ep"]["page"]
part = entry["ep"]["index_title"]
item = {
"avid": avid,
"bvid": bvid,
"season_id": season_id,
"title": title,
"is_completed": is_completed,
"page": page,
"part": part
}
# 判断视频下载完成, 获取视频文件及音频文件信息
if is_completed:
# 视频、音频下载目录
type_tag = entry.get('type_tag')
# 视频路径
video_path = os.path.join(os.path.dirname(entry_file), type_tag, "video.m4s")
if os.path.exists(video_path): # 判断文件是否存在
item["video_path"] = video_path
# 音频路径
audio_path = os.path.join(os.path.dirname(entry_file), type_tag, "audio.m4s")
if os.path.exists(audio_path): # 判断文件是否存在
item["audio_path"] = audio_path
return item
except json.decoder.JSONDecodeError as e:
# 文件无法解析
warnings.warn(f"Warning file could not parse: {entry_file} \n{e.msg}")
def get_command_args() -> tuple:
i = o = None
opts, args = getopt(argv[1:], "i:o:")
for opt, arg in opts:
if opt in ["i"]:
i = arg
if opt in ["o"]:
o = arg
return i, o
def load_config():
try:
global CONFIG
with open(CONFIG_PATH, "r") as fp:
CONFIG = json.load(fp)
except FileNotFoundError:
print("create default config.")
CONFIG = {
"input_dir": "download",
"output_dir": "output"
}
refresh_config()
except json.decoder.JSONDecodeError:
print("读取配置文件错误,请检查配置文件,若无法使用可尝试删除配置文件。")
def refresh_config():
with open(CONFIG_PATH, 'w', encoding="utf-8") as fp:
json.dump(CONFIG, fp, ensure_ascii=False)
def main():
load_config()
video_convert = BiLiVideoConvert(*get_command_args())
video_convert.run()
if __name__ == '__main__':
main()
| true | true |
f7f4e613ab300ab84ae8de1cb23ce67060ad5691 | 9,244 | py | Python | hangupsbot/commands/__init__.py | mygreentour/hangoutsbot | 9ea2da10f546e6f1dd06c8240187049501c5452a | [
"Unlicense"
] | null | null | null | hangupsbot/commands/__init__.py | mygreentour/hangoutsbot | 9ea2da10f546e6f1dd06c8240187049501c5452a | [
"Unlicense"
] | null | null | null | hangupsbot/commands/__init__.py | mygreentour/hangoutsbot | 9ea2da10f546e6f1dd06c8240187049501c5452a | [
"Unlicense"
] | null | null | null | import asyncio, logging, time
import plugins
logger = logging.getLogger(__name__)
class CommandDispatcher(object):
"""Register commands and run them"""
def __init__(self):
self.bot = None
self.commands = {}
self.admin_commands = []
self.unknown_command = None
self.blocked_command = None
self.tracking = None
self.command_tagsets = {}
def set_bot(self, bot):
self.bot = bot
def set_tracking(self, tracking):
self.tracking = tracking
def get_admin_commands(self, bot, conv_id):
logger.warning("[DEPRECATED] command.get_admin_commands(), use command.get_available_commands() instead")
"""Get list of admin-only commands (set by plugins or in config.json)
list of commands is determined via one of two methods:
default mode allows individual plugins to make the determination for admin and user
commands, user commands can be "promoted" to admin commands via config.json:commands_admin
override this behaviour by defining config.json:commands_user, which will only allow
commands which are explicitly defined in this config key to be executed by users.
note: overriding default behaviour makes all commands admin-only by default
"""
whitelisted_commands = bot.get_config_suboption(conv_id, 'commands_user') or []
if whitelisted_commands:
admin_command_list = self.commands.keys() - whitelisted_commands
else:
commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or []
admin_command_list = commands_admin + self.admin_commands
return list(set(admin_command_list))
def register_tags(self, command, tagsets):
if command not in self.command_tagsets:
self.command_tagsets[command] = set()
if isinstance(tagsets, str):
tagsets = set([tagsets])
self.command_tagsets[command] = self.command_tagsets[command] | tagsets
@property
def deny_prefix(self):
config_tags_deny_prefix = self.bot.get_config_option('commands.tags.deny-prefix') or "!"
return config_tags_deny_prefix
@property
def escalate_tagged(self):
config_tags_escalate = self.bot.get_config_option('commands.tags.escalate') or False
return config_tags_escalate
def get_available_commands(self, bot, chat_id, conv_id):
start_time = time.time()
config_tags_deny_prefix = self.deny_prefix
config_tags_escalate = self.escalate_tagged
config_admins = bot.get_config_suboption(conv_id, 'admins')
is_admin = False
if chat_id in config_admins:
is_admin = True
commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or []
commands_user = bot.get_config_suboption(conv_id, 'commands_user') or []
commands_tagged = bot.get_config_suboption(conv_id, 'commands_tagged') or {}
# convert commands_tagged tag list into a set of (frozen)sets
commands_tagged = { key: set([ frozenset(value if isinstance(value, list) else [value])
for value in values ]) for key, values in commands_tagged.items() }
# combine any plugin-determined tags with the config.json defined ones
if self.command_tagsets:
for command, tagsets in self.command_tagsets.items():
if command not in commands_tagged:
commands_tagged[command] = set()
commands_tagged[command] = commands_tagged[command] | tagsets
all_commands = set(self.commands)
admin_commands = set()
user_commands = set()
if commands_admin is True:
"""commands_admin: true # all commands are admin-only"""
admin_commands = all_commands
elif commands_user is True:
"""commands_user: true # all commands are user-only"""
user_commands = all_commands
elif commands_user:
"""commands_user: [ "command", ... ] # listed are user commands, others admin-only"""
user_commands = set(commands_user)
admin_commands = all_commands - user_commands
else:
"""default: follow config["commands_admin"] + plugin settings"""
admin_commands = set(commands_admin) | set(self.admin_commands)
user_commands = all_commands - admin_commands
# make admin commands unavailable to non-admin user
if not is_admin:
admin_commands = set()
if commands_tagged:
_set_user_tags = set(bot.tags.useractive(chat_id, conv_id))
for command, tags in commands_tagged.items():
if command not in all_commands:
# optimisation: don't check commands that aren't loaded into framework
continue
# raise tagged command access level if escalation required
if config_tags_escalate and command in user_commands:
user_commands.remove(command)
# is tagged command generally available (in user_commands)?
# admins always get access, other users need appropriate tag(s)
# XXX: optimisation: check admin_commands to avoid unnecessary scanning
if command not in user_commands|admin_commands:
for _match in tags:
_set_allow = set([_match] if isinstance(_match, str) else _match)
if is_admin or _set_allow <= _set_user_tags:
admin_commands.update([command])
break
if not is_admin:
# tagged commands can be explicitly denied
_denied = set()
for command in user_commands|admin_commands:
if command in commands_tagged:
tags = commands_tagged[command]
for _match in tags:
_set_allow = set([_match] if isinstance(_match, str) else _match)
_set_deny = { config_tags_deny_prefix + x for x in _set_allow }
if _set_deny <= _set_user_tags:
_denied.update([command])
break
admin_commands = admin_commands - _denied
user_commands = user_commands - _denied
user_commands = user_commands - admin_commands # ensure no overlap
interval = time.time() - start_time
logger.debug("get_available_commands() - {}".format(interval))
return { "admin": list(admin_commands), "user": list(user_commands) }
@asyncio.coroutine
def run(self, bot, event, *args, **kwds):
"""Run command"""
command_name = args[0]
if command_name in self.commands:
func = self.commands[command_name]
elif command_name.lower() in self.commands:
func = self.commands[command_name.lower()]
elif self.unknown_command:
func = self.unknown_command
else:
raise KeyError("command {} not found".format(command_name))
setattr(event, 'command_name', command_name)
args = list(args[1:])
try:
results = yield from func(bot, event, *args, **kwds)
return results
except Exception as e:
logger.exception("RUN: {}".format(func.__name__))
yield from self.bot.coro_send_message(
event.conv,
"<b><pre>{0}</pre></b> <pre>{1}</pre>: <em><pre>{2}</pre></em>".format(
func.__name__, type(e).__name__, str(e)) )
def register(self, *args, admin=False, tags=None, final=False, name=None):
"""Decorator for registering command"""
def wrapper(func):
func_name = name or func.__name__
if final:
# wrap command function in coroutine
func = asyncio.coroutine(func)
self.commands[func_name] = func
if admin:
self.admin_commands.append(func_name)
else:
# just register and return the same function
plugins.tracking.register_command( "admin" if admin else "user",
[func_name],
tags=tags )
return func
# If there is one (and only one) positional argument and this argument is callable,
# assume it is the decorator (without any optional keyword arguments)
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
def register_unknown(self, func):
"""Decorator for registering unknown command"""
self.unknown_command = asyncio.coroutine(func)
return func
def register_blocked(self, func):
"""Decorator for registering unknown command"""
self.blocked_command = asyncio.coroutine(func)
return func
# CommandDispatcher singleton
command = CommandDispatcher()
| 40.366812 | 113 | 0.607313 | import asyncio, logging, time
import plugins
logger = logging.getLogger(__name__)
class CommandDispatcher(object):
def __init__(self):
self.bot = None
self.commands = {}
self.admin_commands = []
self.unknown_command = None
self.blocked_command = None
self.tracking = None
self.command_tagsets = {}
def set_bot(self, bot):
self.bot = bot
def set_tracking(self, tracking):
self.tracking = tracking
def get_admin_commands(self, bot, conv_id):
logger.warning("[DEPRECATED] command.get_admin_commands(), use command.get_available_commands() instead")
whitelisted_commands = bot.get_config_suboption(conv_id, 'commands_user') or []
if whitelisted_commands:
admin_command_list = self.commands.keys() - whitelisted_commands
else:
commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or []
admin_command_list = commands_admin + self.admin_commands
return list(set(admin_command_list))
def register_tags(self, command, tagsets):
if command not in self.command_tagsets:
self.command_tagsets[command] = set()
if isinstance(tagsets, str):
tagsets = set([tagsets])
self.command_tagsets[command] = self.command_tagsets[command] | tagsets
@property
def deny_prefix(self):
config_tags_deny_prefix = self.bot.get_config_option('commands.tags.deny-prefix') or "!"
return config_tags_deny_prefix
@property
def escalate_tagged(self):
config_tags_escalate = self.bot.get_config_option('commands.tags.escalate') or False
return config_tags_escalate
def get_available_commands(self, bot, chat_id, conv_id):
start_time = time.time()
config_tags_deny_prefix = self.deny_prefix
config_tags_escalate = self.escalate_tagged
config_admins = bot.get_config_suboption(conv_id, 'admins')
is_admin = False
if chat_id in config_admins:
is_admin = True
commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or []
commands_user = bot.get_config_suboption(conv_id, 'commands_user') or []
commands_tagged = bot.get_config_suboption(conv_id, 'commands_tagged') or {}
commands_tagged = { key: set([ frozenset(value if isinstance(value, list) else [value])
for value in values ]) for key, values in commands_tagged.items() }
if self.command_tagsets:
for command, tagsets in self.command_tagsets.items():
if command not in commands_tagged:
commands_tagged[command] = set()
commands_tagged[command] = commands_tagged[command] | tagsets
all_commands = set(self.commands)
admin_commands = set()
user_commands = set()
if commands_admin is True:
admin_commands = all_commands
elif commands_user is True:
"""commands_user: true # all commands are user-only"""
user_commands = all_commands
elif commands_user:
"""commands_user: [ "command", ... ] # listed are user commands, others admin-only"""
user_commands = set(commands_user)
admin_commands = all_commands - user_commands
else:
"""default: follow config["commands_admin"] + plugin settings"""
admin_commands = set(commands_admin) | set(self.admin_commands)
user_commands = all_commands - admin_commands
if not is_admin:
admin_commands = set()
if commands_tagged:
_set_user_tags = set(bot.tags.useractive(chat_id, conv_id))
for command, tags in commands_tagged.items():
if command not in all_commands:
continue
if config_tags_escalate and command in user_commands:
user_commands.remove(command)
if command not in user_commands|admin_commands:
for _match in tags:
_set_allow = set([_match] if isinstance(_match, str) else _match)
if is_admin or _set_allow <= _set_user_tags:
admin_commands.update([command])
break
if not is_admin:
_denied = set()
for command in user_commands|admin_commands:
if command in commands_tagged:
tags = commands_tagged[command]
for _match in tags:
_set_allow = set([_match] if isinstance(_match, str) else _match)
_set_deny = { config_tags_deny_prefix + x for x in _set_allow }
if _set_deny <= _set_user_tags:
_denied.update([command])
break
admin_commands = admin_commands - _denied
user_commands = user_commands - _denied
user_commands = user_commands - admin_commands
interval = time.time() - start_time
logger.debug("get_available_commands() - {}".format(interval))
return { "admin": list(admin_commands), "user": list(user_commands) }
@asyncio.coroutine
def run(self, bot, event, *args, **kwds):
command_name = args[0]
if command_name in self.commands:
func = self.commands[command_name]
elif command_name.lower() in self.commands:
func = self.commands[command_name.lower()]
elif self.unknown_command:
func = self.unknown_command
else:
raise KeyError("command {} not found".format(command_name))
setattr(event, 'command_name', command_name)
args = list(args[1:])
try:
results = yield from func(bot, event, *args, **kwds)
return results
except Exception as e:
logger.exception("RUN: {}".format(func.__name__))
yield from self.bot.coro_send_message(
event.conv,
"<b><pre>{0}</pre></b> <pre>{1}</pre>: <em><pre>{2}</pre></em>".format(
func.__name__, type(e).__name__, str(e)) )
def register(self, *args, admin=False, tags=None, final=False, name=None):
def wrapper(func):
func_name = name or func.__name__
if final:
func = asyncio.coroutine(func)
self.commands[func_name] = func
if admin:
self.admin_commands.append(func_name)
else:
plugins.tracking.register_command( "admin" if admin else "user",
[func_name],
tags=tags )
return func
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
def register_unknown(self, func):
self.unknown_command = asyncio.coroutine(func)
return func
def register_blocked(self, func):
self.blocked_command = asyncio.coroutine(func)
return func
command = CommandDispatcher()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.