code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
LemonSoap - headers scent.
Deals with column headers.
"""
import pandas as pd
import inflection
import re
import logging
from ..lemon_bar import LemonBar
from .scent_template import ScentTemplate
class ColumnsScent(ScentTemplate):
"""
Manages headers issue identification and fixing.
"""
def __init__(self, lf: LemonBar):
ScentTemplate.__init__(self, lf, "headers",
"columns_scent.ColumnsScent")
def check(self) -> bool:
"""
Identifies issues with headers in a dataframe.
Correct format is "snake_case", with no special characters. Numbers
are however allowed.
Returns:
False if no issues otherwise True.
"""
columns = self._lb().columns
for column in columns:
fixed = self._standardize(column)
if fixed != column:
self._log.info(f"* '{column}' incorrect format, "
f"should be '{fixed}.")
return self._finish_check()
def fix(self) -> LemonBar:
"""
Fixes headers in a given LemonBar.
Returns:
LemonBar with fixes applied.
"""
self.check()
for issue in self._issues:
# OK to call this here as well as in check as unlikely to be
# enough headers to cause an overhead.
fixed = self._standardize(issue[0])
self._log.info(f"* '{issue[0]}' replaced with '{fixed}'")
self._lb().rename(columns={issue[0]: fixed}, inplace=True)
return self._lb
def _standardize(self, inp: str) -> str:
"""
Converts input to standard column header format.
* snake_case.
* No special characters.
* Less than 24 characters long.
* Unique.
Args:
inp: string to fix.
Returns:
Converted input.
"""
# Make underscored, lower case with no special characters.
fixed = inp.replace(" ", "_")
fixed = inflection.underscore(fixed)
fixed = re.sub('\W+', '', fixed)
# Headers less than 24 chars.
if len(fixed) > 24:
fixed = fixed[:24]
# If not unique then try with repeatedly incrementing numbers.
# TODO: O(n^2) algorithm, becomes very slow with lots of headers that
# are the same. Should use precomputation table.
sim_num = 0
fixed_inc = fixed
while fixed_inc in self._lb().columns:
sim_str = str(sim_num)
fixed_inc = fixed + str(sim_num)
sim_num += 1
return fixed_inc
| [
"re.sub",
"inflection.underscore"
] | [((2078, 2106), 'inflection.underscore', 'inflection.underscore', (['fixed'], {}), '(fixed)\n', (2099, 2106), False, 'import inflection\n'), ((2123, 2148), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', 'fixed'], {}), "('\\\\W+', '', fixed)\n", (2129, 2148), False, 'import re\n')] |
# Generated by Django 3.2.3 on 2021-10-19 18:54
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='relations',
field=models.ManyToManyField(related_name='_user_userprofile_relations_+', to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.models.ManyToManyField"
] | [((361, 463), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""_user_userprofile_relations_+"""', 'to': 'settings.AUTH_USER_MODEL'}), "(related_name='_user_userprofile_relations_+', to=\n settings.AUTH_USER_MODEL)\n", (383, 463), False, 'from django.db import migrations, models\n')] |
#python -m marbles test_semantic_columns.py
import unittest
from marbles.mixins import mixins
import pandas as pd
import requests
from pyspark.sql import SparkSession
import psycopg2 as pg
import pandas as pd
import marbles
from pyspark.sql.types import StructType, StructField, StringType
import psycopg2 as pg
#from src.features.build_features import crear_features
from src import(
MY_USER,
MY_PASS,
MY_HOST,
MY_PORT,
MY_DB,
)
def get_clean_data_test():
clean_rita = StructType([StructField('year', StringType(), True),
StructField('quarter', StringType(), True),
StructField('month', StringType(), True),
StructField('dayofmonth', StringType(), True),
StructField('dayofweek', StringType(), True),
StructField('flightdate', StringType(), True),
StructField('reporting_airline', StringType(), True),
StructField('dot_id_reporting_airline', StringType(), True),
StructField('iata_code_reporting_airline', StringType(), True),
StructField('tail_number', StringType(), True),
StructField('flight_number_reporting_airline', StringType(), True),
StructField('originairportid', StringType(), True),
StructField('originairportseqid', StringType(), True),
StructField('origincitymarketid', StringType(), True),
StructField('origin', StringType(), True),
StructField('origincityname', StringType(), True),
StructField('originstate', StringType(), True),
StructField('originstatefips', StringType(), True),
StructField('originstatename', StringType(), True),
StructField('originwac', StringType(), True),
StructField('destairportid', StringType(), True),
StructField('destairportseqid', StringType(), True),
StructField('destcitymarketid', StringType(), True),
StructField('dest', StringType(), True),
StructField('destcityname', StringType(), True),
StructField('deststate', StringType(), True),
StructField('deststatefips', StringType(), True),
StructField('deststatename', StringType(), True),
StructField('destwac', StringType(), True),
StructField('crsdeptime', StringType(), True),
StructField('deptime', StringType(), True),
StructField('depdelay', StringType(), True),
StructField('depdelayminutes', StringType(), True),
StructField('depdel15', StringType(), True),
StructField('departuredelaygroups', StringType(), True),
StructField('deptimeblk', StringType(), True),
StructField('taxiout', StringType(), True),
StructField('wheelsoff', StringType(), True),
StructField('wheelson', StringType(), True),
StructField('taxiin', StringType(), True),
StructField('crsarrtime', StringType(), True),
StructField('arrtime', StringType(), True),
StructField('arrdelay', StringType(), True),
StructField('arrdelayminutes', StringType(), True),
StructField('arrdel15', StringType(), True),
StructField('arrivaldelaygroups', StringType(), True),
StructField('arrtimeblk', StringType(), True),
StructField('cancelled', StringType(), True),
StructField('diverted', StringType(), True),
StructField('crselapsedtime', StringType(), True),
StructField('actualelapsedtime', StringType(), True),
StructField('airtime', StringType(), True),
StructField('flights', StringType(), True),
StructField('distance', StringType(), True),
StructField('distancegroup', StringType(), True),
StructField('divairportlandings', StringType(), True),
StructField('rangoatrasohoras', StringType(), True)
])
config_psyco = "host='{0}' dbname='{1}' user='{2}' password='{3}'".format(MY_HOST,MY_DB,MY_USER,MY_PASS)
connection = pg.connect(config_psyco)
pdf = pd.read_sql_query('select * from clean.rita limit 1;',con=connection)
spark = SparkSession.builder.config('spark.driver.extraClassPath', 'postgresql-9.4.1207.jar').getOrCreate()
df = spark.createDataFrame(pdf, schema=clean_rita)
return df
def crear_features_test(base):
from pyspark.sql import functions as f
base = base.withColumn('findesemana', f.when(f.col('dayofweek') == 5, 1).when(f.col('dayofweek') == 6, 1).when(f.col('dayofweek') == 7, 1).otherwise(0))
base = base.withColumn('quincena', f.when(f.col('dayofmonth') == 15, 1).when(f.col('dayofmonth') == 14, 1).when(f.col('dayofmonth') == 16, 1).when(f.col('dayofmonth') == 29, 1).when(f.col('dayofmonth') == 30, 1).when(f.col('dayofmonth') == 31, 1).when(f.col('dayofmonth') == 1, 1).when(f.col('dayofmonth') == 2, 1).when(f.col('dayofmonth') == 3, 1).otherwise(0))
base = base.withColumn('dephour', f.when(f.col('dayofweek') == 5, 1).otherwise(0))
base = base.withColumn('seishoras', f.when(f.col('dephour') == 6, 1).when(f.col('dephour') == 12, 1).when(f.col('dephour') == 18, 1).when(f.col('dephour') == 0, 1).otherwise(0))
return base
| [
"psycopg2.connect",
"pandas.read_sql_query",
"pyspark.sql.functions.col",
"pyspark.sql.SparkSession.builder.config",
"pyspark.sql.types.StringType"
] | [((5059, 5083), 'psycopg2.connect', 'pg.connect', (['config_psyco'], {}), '(config_psyco)\n', (5069, 5083), True, 'import psycopg2 as pg\n'), ((5094, 5164), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""select * from clean.rita limit 1;"""'], {'con': 'connection'}), "('select * from clean.rita limit 1;', con=connection)\n", (5111, 5164), True, 'import pandas as pd\n'), ((5176, 5265), 'pyspark.sql.SparkSession.builder.config', 'SparkSession.builder.config', (['"""spark.driver.extraClassPath"""', '"""postgresql-9.4.1207.jar"""'], {}), "('spark.driver.extraClassPath',\n 'postgresql-9.4.1207.jar')\n", (5203, 5265), False, 'from pyspark.sql import SparkSession\n'), ((510, 522), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (520, 522), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((583, 595), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (593, 595), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((654, 666), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (664, 666), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((730, 742), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (740, 742), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((805, 817), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (815, 817), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((881, 893), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (891, 893), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((964, 976), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (974, 976), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1054, 1066), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1064, 1066), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1147, 1159), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1157, 1159), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1224, 1236), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1234, 1236), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1321, 1333), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1331, 1333), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1402, 1414), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1412, 1414), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1486, 1498), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1496, 1498), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1570, 1582), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1580, 1582), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1642, 1654), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1652, 1654), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1722, 1734), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1732, 1734), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1799, 1811), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1809, 1811), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1880, 1892), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1890, 1892), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((1961, 1973), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1971, 1973), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2036, 2048), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2046, 2048), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2115, 2127), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2125, 2127), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2197, 2209), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2207, 2209), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2279, 2291), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2289, 2291), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2349, 2361), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2359, 2361), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2427, 2439), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2437, 2439), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2502, 2514), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2512, 2514), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2581, 2593), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2591, 2593), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2660, 2672), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2670, 2672), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2733, 2745), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2743, 2745), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2809, 2821), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2819, 2821), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2882, 2894), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2892, 2894), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((2956, 2968), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2966, 2968), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3037, 3049), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3047, 3049), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3111, 3123), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3121, 3123), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3197, 3209), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3207, 3209), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3273, 3285), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3283, 3285), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3346, 3358), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3356, 3358), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3421, 3433), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3431, 3433), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3495, 3507), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3505, 3507), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3567, 3579), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3577, 3579), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3643, 3655), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3653, 3655), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3716, 3728), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3726, 3728), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3790, 3802), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3800, 3802), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3871, 3883), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3881, 3883), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((3945, 3957), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (3955, 3957), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4029, 4041), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4039, 4041), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4105, 4117), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4115, 4117), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4180, 4192), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4190, 4192), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4254, 4266), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4264, 4266), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4334, 4346), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4344, 4346), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4417, 4429), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4427, 4429), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4490, 4502), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4500, 4502), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4563, 4575), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4573, 4575), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4637, 4649), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4647, 4649), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4716, 4728), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4726, 4728), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4800, 4812), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4810, 4812), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((4882, 4894), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4892, 4894), False, 'from pyspark.sql.types import StructType, StructField, StringType\n'), ((5540, 5558), 'pyspark.sql.functions.col', 'f.col', (['"""dayofweek"""'], {}), "('dayofweek')\n", (5545, 5558), True, 'from pyspark.sql import functions as f\n'), ((5906, 5925), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5911, 5925), True, 'from pyspark.sql import functions as f\n'), ((5997, 6015), 'pyspark.sql.functions.col', 'f.col', (['"""dayofweek"""'], {}), "('dayofweek')\n", (6002, 6015), True, 'from pyspark.sql import functions as f\n'), ((6181, 6197), 'pyspark.sql.functions.col', 'f.col', (['"""dephour"""'], {}), "('dephour')\n", (6186, 6197), True, 'from pyspark.sql import functions as f\n'), ((5507, 5525), 'pyspark.sql.functions.col', 'f.col', (['"""dayofweek"""'], {}), "('dayofweek')\n", (5512, 5525), True, 'from pyspark.sql import functions as f\n'), ((5872, 5891), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5877, 5891), True, 'from pyspark.sql import functions as f\n'), ((6149, 6165), 'pyspark.sql.functions.col', 'f.col', (['"""dephour"""'], {}), "('dephour')\n", (6154, 6165), True, 'from pyspark.sql import functions as f\n'), ((5474, 5492), 'pyspark.sql.functions.col', 'f.col', (['"""dayofweek"""'], {}), "('dayofweek')\n", (5479, 5492), True, 'from pyspark.sql import functions as f\n'), ((5838, 5857), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5843, 5857), True, 'from pyspark.sql import functions as f\n'), ((6117, 6133), 'pyspark.sql.functions.col', 'f.col', (['"""dephour"""'], {}), "('dephour')\n", (6122, 6133), True, 'from pyspark.sql import functions as f\n'), ((5803, 5822), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5808, 5822), True, 'from pyspark.sql import functions as f\n'), ((6086, 6102), 'pyspark.sql.functions.col', 'f.col', (['"""dephour"""'], {}), "('dephour')\n", (6091, 6102), True, 'from pyspark.sql import functions as f\n'), ((5768, 5787), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5773, 5787), True, 'from pyspark.sql import functions as f\n'), ((5733, 5752), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5738, 5752), True, 'from pyspark.sql import functions as f\n'), ((5698, 5717), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5703, 5717), True, 'from pyspark.sql import functions as f\n'), ((5663, 5682), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5668, 5682), True, 'from pyspark.sql import functions as f\n'), ((5628, 5647), 'pyspark.sql.functions.col', 'f.col', (['"""dayofmonth"""'], {}), "('dayofmonth')\n", (5633, 5647), True, 'from pyspark.sql import functions as f\n')] |
import math
class polygon:
def __init__(self, arr):
self.original_arr = arr
self.size = len(self.original_arr)
self.__set_min_max_by_original__()
self.__refactor_original_seq__()
self.sorted_arr.append(self.sorted_arr[0])
self.size += 1
def __set_min_max_by_original__(self):
self.x_min_ind = 0
self.x_max_ind = 0
self.y_min_ind = 0
self.y_max_ind = 0
for i in range(1, self.size):
if self.original_arr[i][0] > self.original_arr[self.x_max_ind][0]:
self.x_max_ind = i
if self.original_arr[i][0] < self.original_arr[self.x_min_ind][0]:
self.x_min_ind = i
if self.original_arr[i][1] > self.original_arr[self.y_max_ind][1]:
self.y_max_ind = i
if self.original_arr[i][1] < self.original_arr[self.y_min_ind][1]:
self.y_min_ind = i
def __refactor_original_seq__(self):
self.sorted_arr = []
for i in range(self.x_min_ind, self.size):
self.sorted_arr.append(self.original_arr[i])
for i in range(0, self.x_min_ind):
self.sorted_arr.append(self.original_arr[i])
self.x_max_ind = (self.x_max_ind - self.x_min_ind) % self.size
self.y_max_ind = (self.y_max_ind - self.x_min_ind) % self.size
self.y_min_ind = (self.y_min_ind - self.x_min_ind) % self.size
self.x_min_ind = 0
def __equal__(x1, x2):
return abs(x1 - x2) < 1E-4
def get_top_border(self, x):
if polygon.__equal__(x, self.sorted_arr[self.x_max_ind][0]):
if polygon.__equal__(self.sorted_arr[self.x_max_ind][0], self.sorted_arr[self.x_max_ind + 1][0]):
return max(self.sorted_arr[self.x_max_ind][1], self.sorted_arr[self.x_max_ind + 1][1])
else:
return self.sorted_arr[self.x_max_ind][1]
if polygon.__equal__(x, self.sorted_arr[self.x_min_ind][0]):
if polygon.__equal__(self.sorted_arr[self.x_min_ind][0], self.sorted_arr[self.x_min_ind + 1][0]):
return max(self.sorted_arr[self.x_min_ind][1], self.sorted_arr[self.x_min_ind + 1][1])
else:
return self.sorted_arr[self.x_min_ind][1]
for i in range(self.x_min_ind, self.x_max_ind):
if x >= self.sorted_arr[i][0] and x < self.sorted_arr[i + 1][0]:
if self.sorted_arr[i][0] != self.sorted_arr[i + 1][0]:
x1 = self.sorted_arr[i][0]
x2 = self.sorted_arr[i + 1][0]
y1 = self.sorted_arr[i][1]
y2 = self.sorted_arr[i + 1][1]
return y1 + (x - x1) * (y2 - y1) / (x2 - x1)
else:
return max(self.sorted_arr[i][1], self.sorted_arr[i + 1][1])
exit(3)
def get_bottom_border(self, x):
if polygon.__equal__(x, self.sorted_arr[self.x_max_ind][0]):
if polygon.__equal__(self.sorted_arr[self.x_max_ind][0], self.sorted_arr[self.x_max_ind + 1][0]):
return min(self.sorted_arr[self.x_max_ind][1], self.sorted_arr[self.x_max_ind + 1][1])
else:
return self.sorted_arr[self.x_max_ind][1]
if polygon.__equal__(x, self.sorted_arr[self.x_min_ind][0]):
if polygon.__equal__(self.sorted_arr[self.x_min_ind][0], self.sorted_arr[self.x_min_ind + 1][0]):
return min(self.sorted_arr[self.x_min_ind][1], self.sorted_arr[self.x_min_ind + 1][1])
else:
return self.sorted_arr[self.x_min_ind][1]
for i in range(self.x_max_ind, self.size - 1):
if x < self.sorted_arr[i][0] and x >= self.sorted_arr[i + 1][0]:
if self.sorted_arr[i][0] != self.sorted_arr[i + 1][0]:
x1 = self.sorted_arr[i][0]
x2 = self.sorted_arr[i + 1][0]
y1 = self.sorted_arr[i][1]
y2 = self.sorted_arr[i + 1][1]
return y1 + (x - x1) * (y2 - y1) / (x2 - x1)
else:
return min(self.sorted_arr[i][1], self.sorted_arr[i + 1][1])
exit(3)
def get_x_min(self):
return self.sorted_arr[self.x_min_ind][0]
def get_x_max(self):
return self.sorted_arr[self.x_max_ind][0]
def get_y_min(self):
return self.sorted_arr[self.y_min_ind][1]
def get_y_max(self):
return self.sorted_arr[self.y_max_ind][1]
def get_contour_length(self):
res = 0
for i in range(0, self.size - 1):
res += math.sqrt((self.sorted_arr[i][0] - self.sorted_arr[i + 1][0]) ** 2 + (self.sorted_arr[i][1] - self.sorted_arr[i + 1][1]) ** 2)
return res
def get_contour_sequence(self, dpi=10):
# returns 2d-array with 1-dimension length same as points in array,
# second dimension have length 3, contains x, y, multiplier constant
n = math.ceil(self.get_contour_length() * dpi)
res_arr = []
for i in range(0, self.size - 1):
x_cur = self.sorted_arr[i][0]
x_next = self.sorted_arr[i + 1][0]
y_cur = self.sorted_arr[i][1]
y_next = self.sorted_arr[i + 1][1]
if not polygon.__equal__(x_cur, x_next):
y_x = lambda x: y_cur + (x - x_cur) * (y_next - y_cur) / (x_next - x_cur)
section_length = math.sqrt((x_next - x_cur) ** 2 + (y_next - y_cur) ** 2)
n = math.ceil(section_length * dpi)
if n != 0:
step_x = (x_next - x_cur) / float(n)
step_len = section_length / float(n)
for i in range(0, n):
tmp_x = x_cur + step_x * (i + 0.5)
tmp_y = y_x(tmp_x)
res_arr.insert(0, [tmp_x, tmp_y, step_len])
else:
section_length = math.sqrt((x_next - x_cur) ** 2 + (y_next - y_cur) ** 2)
n = math.ceil(section_length * dpi)
if n != 0:
step_p = (y_next - y_cur) / float(n)
step_len = section_length / float(n)
for i in range(0, n):
tmp_y = y_cur + step_p * (i + 0.5)
res_arr.insert(0, [x_cur, tmp_y, step_len])
return res_arr
def contains_point(self, x, y):
if x > self.get_x_max() or x < self.get_x_min():
return False
return self.get_top_border(x) >= y and self.get_bottom_border(x) <= y
| [
"math.ceil",
"math.sqrt"
] | [((4603, 4734), 'math.sqrt', 'math.sqrt', (['((self.sorted_arr[i][0] - self.sorted_arr[i + 1][0]) ** 2 + (self.\n sorted_arr[i][1] - self.sorted_arr[i + 1][1]) ** 2)'], {}), '((self.sorted_arr[i][0] - self.sorted_arr[i + 1][0]) ** 2 + (self.\n sorted_arr[i][1] - self.sorted_arr[i + 1][1]) ** 2)\n', (4612, 4734), False, 'import math\n'), ((5419, 5475), 'math.sqrt', 'math.sqrt', (['((x_next - x_cur) ** 2 + (y_next - y_cur) ** 2)'], {}), '((x_next - x_cur) ** 2 + (y_next - y_cur) ** 2)\n', (5428, 5475), False, 'import math\n'), ((5496, 5527), 'math.ceil', 'math.ceil', (['(section_length * dpi)'], {}), '(section_length * dpi)\n', (5505, 5527), False, 'import math\n'), ((5932, 5988), 'math.sqrt', 'math.sqrt', (['((x_next - x_cur) ** 2 + (y_next - y_cur) ** 2)'], {}), '((x_next - x_cur) ** 2 + (y_next - y_cur) ** 2)\n', (5941, 5988), False, 'import math\n'), ((6009, 6040), 'math.ceil', 'math.ceil', (['(section_length * dpi)'], {}), '(section_length * dpi)\n', (6018, 6040), False, 'import math\n')] |
# coding: utf-8
# In[20]:
import numpy as np
import pydensecrf.densecrf as dcrf
import os
import cv2
import random
from tqdm import tqdm
# In[21]:
from skimage.color import gray2rgb
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, accuracy_score
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
#from osgeo import gdal
get_ipython().run_line_magic('matplotlib', 'inline')
# In[22]:
# Color maps for direction map
COLOR_LR = [0,128,128]
COLOR_UD = [128,0,128]
COLOR_DIAG = [255,215,0]
COLOR_ADIAG = [1,255,255]
INF = 10000
# In[23]:
MAX = 0
SUM = 1
VEC = 0
MAT = 1
# In[24]:
def dir_to_features(dir_map):
"""Converts direction color map to feature used for crf kernel. The
feature is obtained by computing the intersections of the x, y axis and the
line determined by the position of one point and its direction. (More details in
the report)
Parameters
____________
dir_map: numpy.array
Direction map that maps each pixel to a direction in
[left_right, up_down, diagonal, anti-diagonal], each direction
is represented by a color.
"""
(h, w, c) = dir_map.shape
feature_map = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
dir_color = dir_map[i,j]
if dir_color[0] == COLOR_LR[0]: # dir = lr
feature_map[i,j] = np.array([INF,i])
if dir_color[0] == COLOR_UP[0]: # dir = ud
feature_map[i,j] = np.array([j,INF])
if dir_color[1] == COLOR_DIAG[0]: # dir = diag
feature_map[i,j] = np.array([j-i,i-j])
if dir_color[1] == COLOR_ADIAG[0]: # dir = adiag
feature_map[i,j] = np.array([i+j, i+j])
return feature_map
# In[25]:
def gen_dir_map(img):
"""Generate direction map from a rgb img
Parameters
____________
img: numpy.array
Rgb img with width = height
"""
window_size = 101
half_size = int((window_size-1)/2)
sigma_1 = 2
sigma_2 = 40
(h, w, c) = img.shape
assert h==w, "h and w are not equal"
dir_map = np.zeros((h,w))
pos_mat = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
index_mask_lr = np.zeros((window_size, window_size)).astype("bool")
index_mask_lr[half_size,:]=True
index_mask_ud = np.zeros((window_size, window_size)).astype("bool")
index_mask_ud[:,half_size]=True
index_mask_diag = np.identity(window_size).astype("bool")
index_mask_adiag = np.fliplr(np.identity(window_size)).astype("bool")
mask_list = [index_mask_lr, index_mask_ud, index_mask_diag, index_mask_adiag]
for i in range(h):
for j in range(w):
img_nbr = padded_img[i:i+window_size,j:j+window_size]
pos_nbr = padded_pos[i:i+window_size,j:j+window_size]
img_nbr = img_nbr - img[i,j,:]
pos_nbr = pos_nbr - np.array([i,j])
dir_intensity = np.zeros(4)
for dir_index, index_mask in enumerate(mask_list):
img_nbr_dir = img_nbr[index_mask]
pos_nbr_dir = pos_nbr[index_mask]
img_nbr_dir = np.sum(img_nbr_dir**2, axis=1)/(2*sigma_1**2)
pos_nbr_dir = np.sum(pos_nbr_dir**2, axis=1)/(2*sigma_2**2)
k = np.exp(-img_nbr_dir-pos_nbr_dir)
dir_intensity[dir_index]=np.sum(k)
dir_map[i,j]=np.argmax(dir_intensity)+1
return dir_map
# In[26]:
def visualize_dir_map(img, dir_map, save_file=False,
filename=None, vis_path=None, dir_path=None):
"""Visualize a direction map
Parameters
____________
img: numpy.array
Rgb img
dir_map: numpy.array
Correspongding direction map
...
"""
h = img.shape[0]
w = img.shape[1]
vis_dir = np.zeros(img.shape)
vis_dir[dir_map==1] = np.array(COLOR_LR)
vis_dir[dir_map==2] = np.array(COLOR_UD)
vis_dir[dir_map==3] = np.array(COLOR_DIAG)
vis_dir[dir_map==4] = np.array(COLOR_ADIAG)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1); plt.imshow(img); plt.title('Original Image (blurred)'); plt.axis('off');
plt.subplot(1,2,2); plt.imshow(dir_map); plt.title('Direction map'); plt.axis('off');
if save_file:
plt.savefig(os.path.join(vis_path, filename),dpi=300)
plt.close()
cv2.imwrite(os.path.join(dir_path, filename), vis_dir)
# In[27]:
def gen_dir_map_and_visualize(image_path= './images/',
vis_path='./vis_dir_blur_/',
dir_path='./dir_map_/',
process_all=True):
"""Generate direction color map for images in image_path
Parameters
____________
image_path: string
Image path
vis_path: string
Path to save visualization results
dir_path: string
Path to save direction map
process_all: Bool
False to generate a single visualization result without save. True to
generate and save visualizaiton results for all images.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if not os.path.exists(vis_path):
os.mkdir(vis_path)
if process_all:
for file in tqdm(os.listdir(image_path)):
img = cv2.imread(os.path.join(image_path, file))
img = cv2.GaussianBlur(img,(5,5),0)
dir_map = gen_dir_map(img)
visualize_dir_map(img, dir_map, filename=file, save_file=True,
vis_path=vis_path, dir_path=dir_path)
else:
img = cv2.imread('./images/satImage_001.png')
img = cv2.GaussianBlur(img,(5,5),0)
dir_map = gen_dir_map(img)
visualize_dir_map(img, dir_map, save_file=False)
# In[28]:
def crf_with_dir_kernel(original_img, dir_feature, prob,
iter_num, compat_smooth, compat_appearance, compat_struct,
w_smooth, w_appearance, w_struct,
sigma_smooth, sigma_app_color, sigma_app_pos,
sigma_struct_pos, sigma_struct_feat):
"""CRF with a Gaussian smoothing kernel, an appearance kernel and a structural kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#Setting up the CRF model
d = dcrf.DenseCRF2D(original_img.shape[1], original_img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
compat_smooth = compat_smooth * w_smooth
compat_appearance = compat_appearance * w_appearance
compat_struct = compat_struct * w_struct
# Smooth kernel
d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Appearance kernel
d.addPairwiseBilateral(sxy=(sigma_app_pos, sigma_app_pos),
srgb=(sigma_app_color, sigma_app_color, sigma_app_color),
rgbim=original_image,
compat=compat_appearance.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Structural kernel
pairwise_energy = create_pairwise_bilateral(sdims=(sigma_struct_pos,sigma_struct_pos),
schan=(sigma_struct_feat,sigma_struct_feat),
img=dir_feature, chdim=2)
d.addPairwiseEnergy(pairwise_energy, compat=compat_struct.astype(np.float32))
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((dir_feature.shape[0], dir_feature.shape[1]))
# In[29]:
def crf(original_image, prob,
iter_num=4, compat_smooth = np.array([[-0.4946432, 1.27117338],[0.59452892, 0.23182234]]),
compat_appearance = np.array([[-0.30571318, 0.83015124],[1.3217825, -0.13046645]]),
w_smooth=3.7946478055761963, w_appearance=1.8458537690881878,
sigma_smooth=8.575103751642672, sigma_color=2.0738539891571977, sigma_color_pos=20):
"""Basic CRF with a Gaussian smoothing kernel and an appearance kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#print("No of labels in the Image are ")
#print(n_labels)
#Setting up the CRF model
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
compat_smooth=compat_smooth*w_smooth
compat_appearance=compat_appearance*w_appearance
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(sigma_smooth, sigma_smooth), compat=compat_smooth.astype(np.float32), kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(sigma_color_pos, sigma_color_pos), srgb=(sigma_color, sigma_color, sigma_color), rgbim=original_image,
compat=compat_appearance.astype(np.float32),
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((original_image.shape[0], original_image.shape[1]))
# In[30]:
def crf_smooth(original_image, prob, use_2d = True, iter_num=1, w=4.921522279119057, sigma_sm=4.325251720130304):
"""CRF with only a smoothing kernel
"""
(h,w) = prob.shape
y = np.zeros((h,w,2))
y[:,:,1] = prob
y[:,:,0] = 1-y[:,:,1]
annotated_image=y.transpose((2, 0, 1))
#Gives no of class labels in the annotated image
n_labels = 2
#Setting up the CRF model
if use_2d :
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_softmax(annotated_image)
unary = np.ascontiguousarray(U)
d.setUnaryEnergy(unary)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(sigma_sm, sigma_sm), compat=w, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(iter_num)
proba = np.array(Q)
return proba[1].reshape((original_image.shape[0], original_image.shape[1]))
# In[31]:
def propagate_max_mat(img, prob):
"""Probability propagation (max) in 4 directions via matrix multiplication
"""
prob_out = prob.copy()
prop_size = 51
half_size = int((prop_size-1)/2)
prop_num = 3
sigma_1 = 5
sigma_2 = 42
(h, w) = prob.shape
pos_mat = np.zeros((h,w,2))
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
index_mask = np.zeros((prop_size, prop_size)).astype("bool")
for i in range(prop_size):
index_mask[i,half_size]=1
index_mask[half_size,i]=1
index_mask[i,i]=1
index_mask[prop_size-1-i,i]=1
for iteration in range(prop_num):
padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size)))
# propagate prob (maximum)
for i in range(h):
for j in range(w):
if prob_out[i,j]<0.01:
continue
img_nbr = padded_img[i:i+prop_size,j:j+prop_size]
pos_nbr = padded_pos[i:i+prop_size,j:j+prop_size]
img_nbr = img_nbr - img[i,j,:]
pos_nbr = pos_nbr - np.array([i,j])
img_nbr[~index_mask]=0
pos_nbr[~index_mask]=0
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*prob_out[i,j]
k = k*index_mask
padded_prob[i:i+prop_size,j:j+prop_size] = np.maximum(padded_prob[i:i+prop_size,j:j+prop_size], k)
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
return prob_out
# In[32]:
def propagate_max_vec(img, prob, prop_size=11,
prop_num=16, sigma_1=1.039316347691348, sigma_2=40):
"""
vec means only do propagation along x and y axis
max means propagate using max function
Args:
prop_size: neighborhood size
prop_num: number of iteration/propagation
sigma_1: variance of color
sigma_2: variance of distance
"""
prob_out = prob.copy()
half_size = int((prop_size-1)/2)
(h, w, c) = img.shape
pos_mat = np.zeros((h,w,2)) # position matrix
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
for iteration in range(prop_num):
padded_prob = np.pad(prob_out, ((half_size, half_size), (half_size, half_size)))
padded_prob_fix = padded_prob.copy()
# propagate prob (maximum)
assert h==w, "h and w are not equal"
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[i+half_size,:]
padded_prob[i:i+prop_size,:] = np.maximum(padded_prob[i:i+prop_size,:], k)
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)*padded_prob_fix[:,i+half_size].reshape((-1,1))
padded_prob[:,i:i+prop_size] = np.maximum(padded_prob[:,i:i+prop_size], k)
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
return prob_out
# In[33]:
def propagate_sum_vec(img, prob, prop_size=11, prop_num=1, sigma_1=1.5319569104856783, sigma_2=80):
"""
vec means only do propagation along x and y axis
sum means propagate in a additive schema (with total probability fixed)
Args:
prop_size: neighborhood size
prop_num: number of iteration/propagation
sigma_1: variance of color
sigma_2: variance of distance
"""
# print(np.sum(prob))
prob_out = prob.copy()
half_size = int((prop_size-1)/2)
(h, w, c) = img.shape
pos_mat = np.zeros((h,w,2)) # position matrix
for i in range(h):
for j in range(w):
pos_mat[i,j,0]=i
pos_mat[i,j,1]=j
padded_pos = np.pad(pos_mat, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_img = np.pad(img, ((half_size, half_size), (half_size, half_size), (0,0)))
padded_prob = np.pad(prob, ((half_size, half_size), (half_size, half_size)))
for iteration in range(prop_num):
padded_prob_fix = padded_prob.copy()
padded_prob = np.pad(np.zeros((h,w)), ((half_size, half_size), (half_size, half_size)))
# propagate prob (sum)
assert h==w, "h and w are not equal"
# compute the degree mat
deg_mat = np.zeros((h+2*half_size,w+2*half_size))
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)
deg_mat[i+half_size,:] = deg_mat[i+half_size,:]+np.sum(k,axis=0)
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr)
deg_mat[:,i+half_size] = deg_mat[:,i+half_size]+np.sum(k,axis=1)
for i in range(h):
# prop along y for row i
img_nbr = padded_img[i:i+prop_size,:]
pos_nbr = padded_pos[i:i+prop_size,:]
img_nbr = img_nbr - padded_img[i+half_size,:,:]
pos_nbr = pos_nbr - padded_pos[i+half_size,:,:]
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr) # similarity matrix
k = k/deg_mat[i+half_size,:] #devided by degree
prop_prob = k * padded_prob_fix[i+half_size,:]
padded_prob[i:i+prop_size,:] = padded_prob[i:i+prop_size,:] + prop_prob
# prop along x for col i
img_nbr = padded_img[:,i:i+prop_size]
pos_nbr = padded_pos[:,i:i+prop_size]
img_nbr = img_nbr - padded_img[:,i+half_size,:].reshape((padded_img.shape[0],1,c))
pos_nbr = pos_nbr - padded_pos[:,i+half_size,:].reshape((padded_img.shape[0],1,2))
img_nbr = np.sum(img_nbr**2, axis=2)/(2*sigma_1**2)
pos_nbr = np.sum(pos_nbr**2, axis=2)/(2*sigma_2**2)
k = np.exp(-img_nbr-pos_nbr) # similarity matrix
k = k/deg_mat[:,i+half_size].reshape((-1,1)) #devided by degree
prop_prob = k * padded_prob_fix[:,i+half_size].reshape((-1,1))
padded_prob[:,i:i+prop_size] = padded_prob[:,i:i+prop_size]+ prop_prob
# padded_prob = padded_prob + 0.5 * padded_prob_fix # lazy propagation
prob_out = padded_prob[half_size:h+half_size,half_size:w+half_size]
# print(np.sum(prob_out))
prob_out[prob_out>1]=1
return prob_out
# In[34]:
def prob_to_patch(im):
"""Convert pixel level probability prediction to patch version
"""
patch_list = []
patch_size = 16
for j in range(0, im.shape[1], patch_size):
for i in range(0, im.shape[0], patch_size):
patch = im[i:i + patch_size, j:j + patch_size]
df = np.mean(patch)
patch_list.append(df)
return np.array(patch_list)
| [
"numpy.ascontiguousarray",
"numpy.array",
"pydensecrf.utils.unary_from_softmax",
"matplotlib.pyplot.imshow",
"pydensecrf.densecrf.DenseCRF2D",
"os.path.exists",
"os.listdir",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.exp",
"os.mkdir",
"matplotlib.pyplot.axis",
"numpy.maximum",
"numpy... | [((1290, 1309), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (1298, 1309), True, 'import numpy as np\n'), ((2223, 2239), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (2231, 2239), True, 'import numpy as np\n'), ((2253, 2272), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (2261, 2272), True, 'import numpy as np\n'), ((2409, 2482), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (2415, 2482), True, 'import numpy as np\n'), ((2499, 2568), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (2505, 2568), True, 'import numpy as np\n'), ((4187, 4206), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (4195, 4206), True, 'import numpy as np\n'), ((4233, 4251), 'numpy.array', 'np.array', (['COLOR_LR'], {}), '(COLOR_LR)\n', (4241, 4251), True, 'import numpy as np\n'), ((4278, 4296), 'numpy.array', 'np.array', (['COLOR_UD'], {}), '(COLOR_UD)\n', (4286, 4296), True, 'import numpy as np\n'), ((4323, 4343), 'numpy.array', 'np.array', (['COLOR_DIAG'], {}), '(COLOR_DIAG)\n', (4331, 4343), True, 'import numpy as np\n'), ((4370, 4391), 'numpy.array', 'np.array', (['COLOR_ADIAG'], {}), '(COLOR_ADIAG)\n', (4378, 4391), True, 'import numpy as np\n'), ((4396, 4423), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4406, 4423), True, 'import matplotlib.pyplot as plt\n'), ((4427, 4447), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4438, 4447), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4462), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4457, 4462), True, 'import matplotlib.pyplot as plt\n'), ((4464, 4501), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image (blurred)"""'], {}), "('Original Image (blurred)')\n", (4473, 4501), True, 'import matplotlib.pyplot as plt\n'), ((4503, 4518), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4511, 4518), True, 'import matplotlib.pyplot as plt\n'), ((4524, 4544), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4535, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4544, 4563), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dir_map'], {}), '(dir_map)\n', (4554, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4565, 4591), 'matplotlib.pyplot.title', 'plt.title', (['"""Direction map"""'], {}), "('Direction map')\n", (4574, 4591), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4608), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4601, 4608), True, 'import matplotlib.pyplot as plt\n'), ((6606, 6625), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (6614, 6625), True, 'import numpy as np\n'), ((6822, 6893), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['original_img.shape[1]', 'original_img.shape[0]', 'n_labels'], {}), '(original_img.shape[1], original_img.shape[0], n_labels)\n', (6837, 6893), True, 'import pydensecrf.densecrf as dcrf\n'), ((6969, 7004), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['annotated_image'], {}), '(annotated_image)\n', (6987, 7004), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((7017, 7040), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['U'], {}), '(U)\n', (7037, 7040), True, 'import numpy as np\n'), ((7926, 8072), 'pydensecrf.utils.create_pairwise_bilateral', 'create_pairwise_bilateral', ([], {'sdims': '(sigma_struct_pos, sigma_struct_pos)', 'schan': '(sigma_struct_feat, sigma_struct_feat)', 'img': 'dir_feature', 'chdim': '(2)'}), '(sdims=(sigma_struct_pos, sigma_struct_pos), schan\n =(sigma_struct_feat, sigma_struct_feat), img=dir_feature, chdim=2)\n', (7951, 8072), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((8293, 8304), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (8301, 8304), True, 'import numpy as np\n'), ((8459, 8521), 'numpy.array', 'np.array', (['[[-0.4946432, 1.27117338], [0.59452892, 0.23182234]]'], {}), '([[-0.4946432, 1.27117338], [0.59452892, 0.23182234]])\n', (8467, 8521), True, 'import numpy as np\n'), ((8552, 8615), 'numpy.array', 'np.array', (['[[-0.30571318, 0.83015124], [1.3217825, -0.13046645]]'], {}), '([[-0.30571318, 0.83015124], [1.3217825, -0.13046645]])\n', (8560, 8615), True, 'import numpy as np\n'), ((8901, 8920), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (8909, 8920), True, 'import numpy as np\n'), ((9201, 9276), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['original_image.shape[1]', 'original_image.shape[0]', 'n_labels'], {}), '(original_image.shape[1], original_image.shape[0], n_labels)\n', (9216, 9276), True, 'import pydensecrf.densecrf as dcrf\n'), ((9352, 9387), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['annotated_image'], {}), '(annotated_image)\n', (9370, 9387), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((9400, 9423), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['U'], {}), '(U)\n', (9420, 9423), True, 'import numpy as np\n'), ((10258, 10269), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (10266, 10269), True, 'import numpy as np\n'), ((10562, 10581), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (10570, 10581), True, 'import numpy as np\n'), ((11349, 11360), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (11357, 11360), True, 'import numpy as np\n'), ((11752, 11771), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (11760, 11771), True, 'import numpy as np\n'), ((11908, 11981), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (11914, 11981), True, 'import numpy as np\n'), ((11998, 12067), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (12004, 12067), True, 'import numpy as np\n'), ((13882, 13901), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (13890, 13901), True, 'import numpy as np\n'), ((14063, 14136), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (14069, 14136), True, 'import numpy as np\n'), ((14153, 14222), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (14159, 14222), True, 'import numpy as np\n'), ((16379, 16398), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (16387, 16398), True, 'import numpy as np\n'), ((16560, 16633), 'numpy.pad', 'np.pad', (['pos_mat', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(pos_mat, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (16566, 16633), True, 'import numpy as np\n'), ((16650, 16719), 'numpy.pad', 'np.pad', (['img', '((half_size, half_size), (half_size, half_size), (0, 0))'], {}), '(img, ((half_size, half_size), (half_size, half_size), (0, 0)))\n', (16656, 16719), True, 'import numpy as np\n'), ((16737, 16799), 'numpy.pad', 'np.pad', (['prob', '((half_size, half_size), (half_size, half_size))'], {}), '(prob, ((half_size, half_size), (half_size, half_size)))\n', (16743, 16799), True, 'import numpy as np\n'), ((20330, 20350), 'numpy.array', 'np.array', (['patch_list'], {}), '(patch_list)\n', (20338, 20350), True, 'import numpy as np\n'), ((4698, 4709), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4707, 4709), True, 'import matplotlib.pyplot as plt\n'), ((5448, 5472), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (5462, 5472), False, 'import os\n'), ((5482, 5500), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (5490, 5500), False, 'import os\n'), ((5512, 5536), 'os.path.exists', 'os.path.exists', (['vis_path'], {}), '(vis_path)\n', (5526, 5536), False, 'import os\n'), ((5546, 5564), 'os.mkdir', 'os.mkdir', (['vis_path'], {}), '(vis_path)\n', (5554, 5564), False, 'import os\n'), ((5951, 5990), 'cv2.imread', 'cv2.imread', (['"""./images/satImage_001.png"""'], {}), "('./images/satImage_001.png')\n", (5961, 5990), False, 'import cv2\n'), ((6005, 6037), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (6021, 6037), False, 'import cv2\n'), ((10802, 10877), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['original_image.shape[1]', 'original_image.shape[0]', 'n_labels'], {}), '(original_image.shape[1], original_image.shape[0], n_labels)\n', (10817, 10877), True, 'import pydensecrf.densecrf as dcrf\n'), ((10961, 10996), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['annotated_image'], {}), '(annotated_image)\n', (10979, 10996), False, 'from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((11013, 11036), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['U'], {}), '(U)\n', (11033, 11036), True, 'import numpy as np\n'), ((12369, 12435), 'numpy.pad', 'np.pad', (['prob_out', '((half_size, half_size), (half_size, half_size))'], {}), '(prob_out, ((half_size, half_size), (half_size, half_size)))\n', (12375, 12435), True, 'import numpy as np\n'), ((14291, 14357), 'numpy.pad', 'np.pad', (['prob_out', '((half_size, half_size), (half_size, half_size))'], {}), '(prob_out, ((half_size, half_size), (half_size, half_size)))\n', (14297, 14357), True, 'import numpy as np\n'), ((17106, 17154), 'numpy.zeros', 'np.zeros', (['(h + 2 * half_size, w + 2 * half_size)'], {}), '((h + 2 * half_size, w + 2 * half_size))\n', (17114, 17154), True, 'import numpy as np\n'), ((2593, 2629), 'numpy.zeros', 'np.zeros', (['(window_size, window_size)'], {}), '((window_size, window_size))\n', (2601, 2629), True, 'import numpy as np\n'), ((2701, 2737), 'numpy.zeros', 'np.zeros', (['(window_size, window_size)'], {}), '((window_size, window_size))\n', (2709, 2737), True, 'import numpy as np\n'), ((2811, 2835), 'numpy.identity', 'np.identity', (['window_size'], {}), '(window_size)\n', (2822, 2835), True, 'import numpy as np\n'), ((3308, 3319), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3316, 3319), True, 'import numpy as np\n'), ((4648, 4680), 'os.path.join', 'os.path.join', (['vis_path', 'filename'], {}), '(vis_path, filename)\n', (4660, 4680), False, 'import os\n'), ((4730, 4762), 'os.path.join', 'os.path.join', (['dir_path', 'filename'], {}), '(dir_path, filename)\n', (4742, 4762), False, 'import os\n'), ((5610, 5632), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (5620, 5632), False, 'import os\n'), ((5714, 5746), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (5730, 5746), False, 'import cv2\n'), ((12089, 12121), 'numpy.zeros', 'np.zeros', (['(prop_size, prop_size)'], {}), '((prop_size, prop_size))\n', (12097, 12121), True, 'import numpy as np\n'), ((15010, 15056), 'numpy.maximum', 'np.maximum', (['padded_prob[i:i + prop_size, :]', 'k'], {}), '(padded_prob[i:i + prop_size, :], k)\n', (15020, 15056), True, 'import numpy as np\n'), ((15653, 15699), 'numpy.maximum', 'np.maximum', (['padded_prob[:, i:i + prop_size]', 'k'], {}), '(padded_prob[:, i:i + prop_size], k)\n', (15663, 15699), True, 'import numpy as np\n'), ((16912, 16928), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (16920, 16928), True, 'import numpy as np\n'), ((17574, 17600), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (17580, 17600), True, 'import numpy as np\n'), ((18160, 18186), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (18166, 18186), True, 'import numpy as np\n'), ((18691, 18717), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (18697, 18717), True, 'import numpy as np\n'), ((19411, 19437), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (19417, 19437), True, 'import numpy as np\n'), ((20270, 20284), 'numpy.mean', 'np.mean', (['patch'], {}), '(patch)\n', (20277, 20284), True, 'import numpy as np\n'), ((1485, 1503), 'numpy.array', 'np.array', (['[INF, i]'], {}), '([INF, i])\n', (1493, 1503), True, 'import numpy as np\n'), ((1593, 1611), 'numpy.array', 'np.array', (['[j, INF]'], {}), '([j, INF])\n', (1601, 1611), True, 'import numpy as np\n'), ((1705, 1729), 'numpy.array', 'np.array', (['[j - i, i - j]'], {}), '([j - i, i - j])\n', (1713, 1729), True, 'import numpy as np\n'), ((1821, 1845), 'numpy.array', 'np.array', (['[i + j, i + j]'], {}), '([i + j, i + j])\n', (1829, 1845), True, 'import numpy as np\n'), ((2884, 2908), 'numpy.identity', 'np.identity', (['window_size'], {}), '(window_size)\n', (2895, 2908), True, 'import numpy as np\n'), ((3264, 3280), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (3272, 3280), True, 'import numpy as np\n'), ((3655, 3689), 'numpy.exp', 'np.exp', (['(-img_nbr_dir - pos_nbr_dir)'], {}), '(-img_nbr_dir - pos_nbr_dir)\n', (3661, 3689), True, 'import numpy as np\n'), ((3729, 3738), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (3735, 3738), True, 'import numpy as np\n'), ((3764, 3788), 'numpy.argmax', 'np.argmax', (['dir_intensity'], {}), '(dir_intensity)\n', (3773, 3788), True, 'import numpy as np\n'), ((5664, 5694), 'os.path.join', 'os.path.join', (['image_path', 'file'], {}), '(image_path, file)\n', (5676, 5694), False, 'import os\n'), ((13193, 13253), 'numpy.maximum', 'np.maximum', (['padded_prob[i:i + prop_size, j:j + prop_size]', 'k'], {}), '(padded_prob[i:i + prop_size, j:j + prop_size], k)\n', (13203, 13253), True, 'import numpy as np\n'), ((14789, 14817), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (14795, 14817), True, 'import numpy as np\n'), ((14853, 14881), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (14859, 14881), True, 'import numpy as np\n'), ((14911, 14937), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (14917, 14937), True, 'import numpy as np\n'), ((15416, 15444), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (15422, 15444), True, 'import numpy as np\n'), ((15480, 15508), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (15486, 15508), True, 'import numpy as np\n'), ((15538, 15564), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (15544, 15564), True, 'import numpy as np\n'), ((17452, 17480), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (17458, 17480), True, 'import numpy as np\n'), ((17516, 17544), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (17522, 17544), True, 'import numpy as np\n'), ((17659, 17676), 'numpy.sum', 'np.sum', (['k'], {'axis': '(0)'}), '(k, axis=0)\n', (17665, 17676), True, 'import numpy as np\n'), ((18038, 18066), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (18044, 18066), True, 'import numpy as np\n'), ((18102, 18130), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (18108, 18130), True, 'import numpy as np\n'), ((18245, 18262), 'numpy.sum', 'np.sum', (['k'], {'axis': '(1)'}), '(k, axis=1)\n', (18251, 18262), True, 'import numpy as np\n'), ((18569, 18597), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (18575, 18597), True, 'import numpy as np\n'), ((18633, 18661), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (18639, 18661), True, 'import numpy as np\n'), ((19289, 19317), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (19295, 19317), True, 'import numpy as np\n'), ((19353, 19381), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (19359, 19381), True, 'import numpy as np\n'), ((3513, 3545), 'numpy.sum', 'np.sum', (['(img_nbr_dir ** 2)'], {'axis': '(1)'}), '(img_nbr_dir ** 2, axis=1)\n', (3519, 3545), True, 'import numpy as np\n'), ((3589, 3621), 'numpy.sum', 'np.sum', (['(pos_nbr_dir ** 2)'], {'axis': '(1)'}), '(pos_nbr_dir ** 2, axis=1)\n', (3595, 3621), True, 'import numpy as np\n'), ((12812, 12828), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (12820, 12828), True, 'import numpy as np\n'), ((12932, 12960), 'numpy.sum', 'np.sum', (['(img_nbr ** 2)'], {'axis': '(2)'}), '(img_nbr ** 2, axis=2)\n', (12938, 12960), True, 'import numpy as np\n'), ((13000, 13028), 'numpy.sum', 'np.sum', (['(pos_nbr ** 2)'], {'axis': '(2)'}), '(pos_nbr ** 2, axis=2)\n', (13006, 13028), True, 'import numpy as np\n'), ((13062, 13088), 'numpy.exp', 'np.exp', (['(-img_nbr - pos_nbr)'], {}), '(-img_nbr - pos_nbr)\n', (13068, 13088), True, 'import numpy as np\n')] |
import random
from django.core.management.base import BaseCommand
from django.contrib.admin.utils import flatten
from django_seed import Seed
from lists import models as list_models
from users import models as user_models
from rooms import models as room_models
NAME = "lists"
class Command(BaseCommand):
help = f"This command creates {NAME}"
def handle(self, *args, **options):
users = user_models.User.objects.all()
rooms = room_models.Room.objects.all()
for user in users:
list_model = list_models.List.objects.create(user=user, name="Favs.")
to_add = rooms[random.randint(0, 5) : random.randint(6, 30)]
list_model.rooms.add(*to_add)
self.stdout.write(self.style.SUCCESS(f"{0} {NAME} created!"))
| [
"users.models.User.objects.all",
"rooms.models.Room.objects.all",
"lists.models.List.objects.create",
"random.randint"
] | [((409, 439), 'users.models.User.objects.all', 'user_models.User.objects.all', ([], {}), '()\n', (437, 439), True, 'from users import models as user_models\n'), ((456, 486), 'rooms.models.Room.objects.all', 'room_models.Room.objects.all', ([], {}), '()\n', (484, 486), True, 'from rooms import models as room_models\n'), ((540, 596), 'lists.models.List.objects.create', 'list_models.List.objects.create', ([], {'user': 'user', 'name': '"""Favs."""'}), "(user=user, name='Favs.')\n", (571, 596), True, 'from lists import models as list_models\n'), ((624, 644), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (638, 644), False, 'import random\n'), ((647, 668), 'random.randint', 'random.randint', (['(6)', '(30)'], {}), '(6, 30)\n', (661, 668), False, 'import random\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('classroom', '0002_assignment_description'),
]
operations = [
migrations.AddField(
model_name='assignment',
name='evaluation_date',
field=models.DateTimeField(null=True, verbose_name='Fecha de evaluaci\xf3n', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='assignment',
name='is_evaluated',
field=models.BooleanField(default=False, help_text='Tildar para indicar que la evaluaci\xf3n ya fue tomada y est\xe1 disponible.', verbose_name='Evaluado'),
preserve_default=True,
),
migrations.AddField(
model_name='assignment',
name='is_scored',
field=models.BooleanField(default=False, help_text='Tildar para indicar que la evaluaci\xf3n ya fue corregida y las notas est\xe1n disponibles.', verbose_name='Corregido'),
preserve_default=True,
),
migrations.AddField(
model_name='assignment',
name='score_date',
field=models.DateTimeField(null=True, verbose_name='Fecha de Notas', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='assignment',
name='is_published',
field=models.BooleanField(default=False, help_text='Tildar para mostrar la asignaci\xf3n a los inscriptos.', verbose_name='Publicado'),
preserve_default=True,
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.BooleanField"
] | [((369, 448), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'verbose_name': '"""Fecha de evaluación"""', 'blank': '(True)'}), "(null=True, verbose_name='Fecha de evaluación', blank=True)\n", (389, 448), False, 'from django.db import models, migrations\n'), ((616, 768), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Tildar para indicar que la evaluación ya fue tomada y está disponible."""', 'verbose_name': '"""Evaluado"""'}), "(default=False, help_text=\n 'Tildar para indicar que la evaluación ya fue tomada y está disponible.',\n verbose_name='Evaluado')\n", (635, 768), False, 'from django.db import models, migrations\n'), ((927, 1096), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Tildar para indicar que la evaluación ya fue corregida y las notas están disponibles."""', 'verbose_name': '"""Corregido"""'}), "(default=False, help_text=\n 'Tildar para indicar que la evaluación ya fue corregida y las notas están disponibles.'\n , verbose_name='Corregido')\n", (946, 1096), False, 'from django.db import models, migrations\n'), ((1255, 1329), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'verbose_name': '"""Fecha de Notas"""', 'blank': '(True)'}), "(null=True, verbose_name='Fecha de Notas', blank=True)\n", (1275, 1329), False, 'from django.db import models, migrations\n'), ((1496, 1631), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Tildar para mostrar la asignación a los inscriptos."""', 'verbose_name': '"""Publicado"""'}), "(default=False, help_text=\n 'Tildar para mostrar la asignación a los inscriptos.', verbose_name=\n 'Publicado')\n", (1515, 1631), False, 'from django.db import models, migrations\n')] |
import numpy as np
import matplotlib.pyplot as plt
import os
from pyburst.grids import grid_analyser, grid_strings, grid_tools
# resolution tests
y_factors = {'dt': 3600,
'fluence': 1e39,
'peak': 1e38,
}
y_labels = {'dt': '$\Delta t$',
'rate': 'Burst rate',
'fluence': '$E_b$',
'peak': '$L_{peak}$',
'length': 'Burst length',
}
y_units = {'dt': 'hr',
'rate': 'day$^{-1}$',
'fluence': '$10^39$ erg',
'peak': '$10^38$ erg s$^{-1}$',
'length': 's',
}
reference_params = {'accmass': 1e16,
'accdepth': 1e20}
other_param = {'accmass': 'accdepth',
'accdepth': 'accmass'}
x_bounds = {'accmass': [1e15, 1e17],
'accdepth': [1e19, 1e21]}
colors = {True: 'C1',
False: 'C0'}
# TODO add save plot, iterate over params
def save_all_plots(sources, ref_source, grid_version,
params=('x', 'z', 'mass', 'accrate'), **kwargs):
kgrids = get_multigrids(sources, grid_version=grid_version)
source = get_not(sources, ref_source)
unique_all = kgrids[source].unique_params
unique_subset = {}
for p in params:
unique_subset[p] = unique_all[p]
params_full = grid_tools.enumerate_params(unique_subset)
n = len(params_full[params[0]])
for i in range(n):
params_sub = {}
for p in params:
params_sub[p] = params_full[p][i]
plot(params=params_sub, sources=sources, ref_source=ref_source,
kgrids=kgrids, save=True, display=False, title=False, **kwargs)
def plot(params, sources, ref_source, grid_version,
bprops=('rate', 'fluence', 'peak', 'length'), figsize=(9, 10), shaded=False,
display=True, save=False, kgrids=None, title=True, show_nbursts=True):
"""Plot burst properties for given resolution parameter
parameters
----------
params : dict
ref_source : str
source from which the reference model comes
sources: set(str)
list of source(s) to get models from
kgrids : {source: Kgrid}
dict of grid_analyser.Kgrid objects for each source
bprops : [str]
figsize : [int, int]
shaded : bool
shade between y_values of reference model
"""
check_params(params)
n = len(bprops)
fig, ax = plt.subplots(n, 2, sharex=False, figsize=figsize)
if kgrids is None:
kgrids = get_multigrids(sources, grid_version=grid_version)
for i, res_param in enumerate(reference_params):
ref_value = reference_params[res_param]
other_res_param = other_param[res_param]
full_params = dict(params)
full_params[other_res_param] = reference_params[other_res_param]
sub_summ, sub_params = get_subgrids(kgrids, params=full_params)
for j, bprop in enumerate(bprops):
u_bprop = f'u_{bprop}'
y_label = f'{y_labels[bprop]} ({y_units[bprop]})'
y_factor = y_factors.get(bprop, 1)
set_axes(ax[j, i], xscale='log',
ylabel=y_label if i == 0 else '',
xlabel=res_param if j == n-1 else '',
yticks=True if i == 0 else False)
for source in sources:
ref = source == ref_source
x = sub_params[source][res_param]
y = sub_summ[source][bprop] / y_factor
yerr = sub_summ[source][u_bprop] / y_factor
if show_nbursts:
n_bursts = sub_summ[source]['n_used']
for k in range(len(n_bursts)):
x_offset = 1.15
nb = n_bursts.iloc[k]
ax[j, i].text(x.iloc[k] * x_offset, y.iloc[k], f'{nb:.0f}',
verticalalignment='center')
if shaded and ref:
idx = np.where(x == ref_value)[0]
y_ref = y.iloc[idx]
yerr_ref = yerr.iloc[idx]
ax[j, i].fill_between(x_bounds[res_param],
np.full(2, y_ref + yerr_ref),
np.full(2, y_ref - yerr_ref), color='0.85')
ax[j, i].errorbar(x=x, y=y, yerr=yerr, ls='none',
marker='o', capsize=3, color=colors[ref])
if title:
ax[0, 0].set_title(params, fontsize=11)
plt.tight_layout()
if save:
source = get_not(sources, ref_source)
precisions = {'z': 4, 'x': 2, 'qb': 3, 'mass': 1, 'accrate': 2}
fixed_str = ''
for p, v in params.items():
precision = precisions.get(p, 3)
fixed_str += f'_{p}={v:.{precision}f}'
filename = f'resolution_{source}{fixed_str}.png'
path = os.path.join(grid_strings.plots_path(source), 'resolution')
filepath = os.path.join(path, filename)
print(f'Saving {filepath}')
plt.savefig(filepath)
plt.close(fig)
else:
plt.show(block=False)
def get_not(array, var):
"""Returns value in length-2 'array' that is not 'var'
"""
copy = list(array)
copy.remove(var)
return copy[0]
def get_multigrids(sources, grid_version):
kgrids = {}
for source in sources:
kgrids[source] = grid_analyser.Kgrid(source, grid_version=grid_version)
return kgrids
def get_subgrids(kgrids, params):
"""Returns subkgrids of multiple given sources
"""
sub_params = {}
sub_summ = {}
for source in kgrids:
sub_params[source] = kgrids[source].get_params(params=params)
sub_summ[source] = kgrids[source].get_summ(params=params)
return sub_summ, sub_params
def set_axes(ax, title='', xlabel='', ylabel='', yscale='linear', xscale='linear',
fontsize=14, yticks=True, xticks=True):
if not yticks:
ax.axes.tick_params(axis='both', left='off', labelleft='off')
if not xticks:
ax.axes.tick_params(axis='both', bottom='off', labelbottom='off')
ax.set_title(title, fontsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
def check_params(params, must_specify=('x', 'z', 'accrate', 'mass')):
for param in must_specify:
if param not in params:
raise ValueError(f'{param} not specified in params')
| [
"matplotlib.pyplot.savefig",
"pyburst.grids.grid_tools.enumerate_params",
"numpy.where",
"os.path.join",
"matplotlib.pyplot.close",
"pyburst.grids.grid_strings.plots_path",
"matplotlib.pyplot.tight_layout",
"pyburst.grids.grid_analyser.Kgrid",
"numpy.full",
"matplotlib.pyplot.subplots",
"matplot... | [((1303, 1345), 'pyburst.grids.grid_tools.enumerate_params', 'grid_tools.enumerate_params', (['unique_subset'], {}), '(unique_subset)\n', (1330, 1345), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((2387, 2436), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', '(2)'], {'sharex': '(False)', 'figsize': 'figsize'}), '(n, 2, sharex=False, figsize=figsize)\n', (2399, 2436), True, 'import matplotlib.pyplot as plt\n'), ((4491, 4509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4507, 4509), True, 'import matplotlib.pyplot as plt\n'), ((4949, 4977), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (4961, 4977), False, 'import os\n'), ((5022, 5043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (5033, 5043), True, 'import matplotlib.pyplot as plt\n'), ((5052, 5066), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5061, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5085, 5106), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5093, 5106), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5430), 'pyburst.grids.grid_analyser.Kgrid', 'grid_analyser.Kgrid', (['source'], {'grid_version': 'grid_version'}), '(source, grid_version=grid_version)\n', (5395, 5430), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((4883, 4914), 'pyburst.grids.grid_strings.plots_path', 'grid_strings.plots_path', (['source'], {}), '(source)\n', (4906, 4914), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((3947, 3971), 'numpy.where', 'np.where', (['(x == ref_value)'], {}), '(x == ref_value)\n', (3955, 3971), True, 'import numpy as np\n'), ((4166, 4194), 'numpy.full', 'np.full', (['(2)', '(y_ref + yerr_ref)'], {}), '(2, y_ref + yerr_ref)\n', (4173, 4194), True, 'import numpy as np\n'), ((4238, 4266), 'numpy.full', 'np.full', (['(2)', '(y_ref - yerr_ref)'], {}), '(2, y_ref - yerr_ref)\n', (4245, 4266), True, 'import numpy as np\n')] |
# Copyright (c) 2020-present, Assistive Robotics Lab
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from transformers.training_utils import fit
from transformers.transformers import (
InferenceTransformerEncoder,
InferenceTransformer
)
from common.data_utils import load_dataloader
from common.logging import logger
from common.losses import QuatDistance
import torch
from torch import nn, optim
import numpy as np
import argparse
torch.manual_seed(42)
np.random.seed(42)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
def parse_args():
"""Parse arguments for module.
Returns:
argparse.Namespace: contains accessible arguments passed in to module
"""
parser = argparse.ArgumentParser()
parser.add_argument("--task",
help=("task for neural network to train on; "
"either prediction or conversion"))
parser.add_argument("--data-path",
help=("path to h5 files containing data "
"(must contain training.h5 and validation.h5)"))
parser.add_argument("--representation",
help=("will normalize if quaternions, will use expmap "
"to quat validation loss if expmap"),
default="quaternion")
parser.add_argument("--full-transformer",
help=("will use Transformer with both encoder and "
"decoder if true, will only use encoder "
"if false"),
default=False,
action="store_true")
parser.add_argument("--model-file-path",
help="path to model file for saving it after training")
parser.add_argument("--batch-size",
help="batch size for training", default=32)
parser.add_argument("--learning-rate",
help="initial learning rate for training",
default=0.001)
parser.add_argument("--beta-one",
help="beta1 for adam optimizer (momentum)",
default=0.9)
parser.add_argument("--beta-two",
help="beta2 for adam optimizer", default=0.999)
parser.add_argument("--seq-length",
help=("sequence length for model, will be divided "
"by downsample if downsample is provided"),
default=20)
parser.add_argument("--downsample",
help=("reduce sampling frequency of recorded data; "
"default sampling frequency is 240 Hz"),
default=1)
parser.add_argument("--in-out-ratio",
help=("ratio of input/output; "
"seq_length / downsample = input length = 10, "
"output length = input length / in_out_ratio"),
default=1)
parser.add_argument("--stride",
help=("stride used when reading data in "
"for running prediction tasks"),
default=3)
parser.add_argument("--num-epochs",
help="number of epochs for training", default=1)
parser.add_argument("--num-heads",
help="number of heads in Transformer")
parser.add_argument("--dim-feedforward",
help=("number of dimensions in feedforward layer "
"in Transformer"))
parser.add_argument("--dropout",
help="dropout percentage in Transformer")
parser.add_argument("--num-layers",
help="number of layers in Transformer")
args = parser.parse_args()
if args.data_path is None:
parser.print_help()
return args
if __name__ == "__main__":
args = parse_args()
for arg in vars(args):
logger.info(f"{arg} - {getattr(args, arg)}")
logger.info("Starting Transformer training...")
logger.info(f"Device count: {torch.cuda.device_count()}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Training on {device}...")
seq_length = int(args.seq_length)//int(args.downsample)
assert seq_length % int(args.in_out_ratio) == 0
lr = float(args.learning_rate)
normalize = True
train_dataloader, norm_data = load_dataloader(args, "training", normalize)
val_dataloader, _ = load_dataloader(args, "validation", normalize,
norm_data=norm_data)
encoder_feature_size = train_dataloader.dataset[0][0].shape[1]
decoder_feature_size = train_dataloader.dataset[0][1].shape[1]
num_heads = int(args.num_heads)
dim_feedforward = int(args.dim_feedforward)
dropout = float(args.dropout)
num_layers = int(args.num_layers)
quaternions = (args.representation == "quaternions")
if args.full_transformer:
model = InferenceTransformer(decoder_feature_size, num_heads,
dim_feedforward, dropout,
num_layers, quaternions=quaternions)
else:
model = InferenceTransformerEncoder(encoder_feature_size, num_heads,
dim_feedforward, dropout,
num_layers, decoder_feature_size,
quaternions=quaternions)
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.to(device).double()
epochs = int(args.num_epochs)
beta1 = float(args.beta_one)
beta2 = float(args.beta_two)
optimizer = optim.AdamW(model.parameters(),
lr=lr,
betas=(beta1, beta2),
weight_decay=0.03)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[1, 3],
gamma=0.1)
dataloaders = (train_dataloader, val_dataloader)
training_criterion = nn.L1Loss()
validation_criteria = [nn.L1Loss(), QuatDistance()]
logger.info(f"Model for training: {model}")
logger.info(f"Number of parameters: {num_params}")
logger.info(f"Optimizer for training: {optimizer}")
logger.info(f"Criterion for training: {training_criterion}")
fit(model, optimizer, scheduler, epochs, dataloaders, training_criterion,
validation_criteria, device, args.model_file_path,
full_transformer=args.full_transformer)
logger.info("Completed Training...")
logger.info("\n")
| [
"torch.manual_seed",
"torch.optim.lr_scheduler.MultiStepLR",
"argparse.ArgumentParser",
"common.logging.logger.info",
"torch.nn.L1Loss",
"torch.cuda.device_count",
"transformers.training_utils.fit",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"common.losses.QuatDistance",
"numpy.random.s... | [((541, 562), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (558, 562), False, 'import torch\n'), ((563, 581), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (577, 581), True, 'import numpy as np\n'), ((833, 858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (856, 858), False, 'import argparse\n'), ((4185, 4232), 'common.logging.logger.info', 'logger.info', (['"""Starting Transformer training..."""'], {}), "('Starting Transformer training...')\n", (4196, 4232), False, 'from common.logging import logger\n'), ((4375, 4414), 'common.logging.logger.info', 'logger.info', (['f"""Training on {device}..."""'], {}), "(f'Training on {device}...')\n", (4386, 4414), False, 'from common.logging import logger\n'), ((4620, 4664), 'common.data_utils.load_dataloader', 'load_dataloader', (['args', '"""training"""', 'normalize'], {}), "(args, 'training', normalize)\n", (4635, 4664), False, 'from common.data_utils import load_dataloader\n'), ((4689, 4756), 'common.data_utils.load_dataloader', 'load_dataloader', (['args', '"""validation"""', 'normalize'], {'norm_data': 'norm_data'}), "(args, 'validation', normalize, norm_data=norm_data)\n", (4704, 4756), False, 'from common.data_utils import load_dataloader\n'), ((6183, 6254), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[1, 3]', 'gamma': '(0.1)'}), '(optimizer, milestones=[1, 3], gamma=0.1)\n', (6213, 6254), False, 'from torch import nn, optim\n'), ((6428, 6439), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6437, 6439), False, 'from torch import nn, optim\n'), ((6501, 6544), 'common.logging.logger.info', 'logger.info', (['f"""Model for training: {model}"""'], {}), "(f'Model for training: {model}')\n", (6512, 6544), False, 'from common.logging import logger\n'), ((6549, 6599), 'common.logging.logger.info', 'logger.info', (['f"""Number of parameters: {num_params}"""'], {}), "(f'Number of parameters: {num_params}')\n", (6560, 6599), False, 'from common.logging import logger\n'), ((6604, 6655), 'common.logging.logger.info', 'logger.info', (['f"""Optimizer for training: {optimizer}"""'], {}), "(f'Optimizer for training: {optimizer}')\n", (6615, 6655), False, 'from common.logging import logger\n'), ((6660, 6720), 'common.logging.logger.info', 'logger.info', (['f"""Criterion for training: {training_criterion}"""'], {}), "(f'Criterion for training: {training_criterion}')\n", (6671, 6720), False, 'from common.logging import logger\n'), ((6726, 6899), 'transformers.training_utils.fit', 'fit', (['model', 'optimizer', 'scheduler', 'epochs', 'dataloaders', 'training_criterion', 'validation_criteria', 'device', 'args.model_file_path'], {'full_transformer': 'args.full_transformer'}), '(model, optimizer, scheduler, epochs, dataloaders, training_criterion,\n validation_criteria, device, args.model_file_path, full_transformer=\n args.full_transformer)\n', (6729, 6899), False, 'from transformers.training_utils import fit\n'), ((6912, 6948), 'common.logging.logger.info', 'logger.info', (['"""Completed Training..."""'], {}), "('Completed Training...')\n", (6923, 6948), False, 'from common.logging import logger\n'), ((6953, 6970), 'common.logging.logger.info', 'logger.info', (['"""\n"""'], {}), "('\\n')\n", (6964, 6970), False, 'from common.logging import logger\n'), ((5193, 5313), 'transformers.transformers.InferenceTransformer', 'InferenceTransformer', (['decoder_feature_size', 'num_heads', 'dim_feedforward', 'dropout', 'num_layers'], {'quaternions': 'quaternions'}), '(decoder_feature_size, num_heads, dim_feedforward,\n dropout, num_layers, quaternions=quaternions)\n', (5213, 5313), False, 'from transformers.transformers import InferenceTransformerEncoder, InferenceTransformer\n'), ((5410, 5564), 'transformers.transformers.InferenceTransformerEncoder', 'InferenceTransformerEncoder', (['encoder_feature_size', 'num_heads', 'dim_feedforward', 'dropout', 'num_layers', 'decoder_feature_size'], {'quaternions': 'quaternions'}), '(encoder_feature_size, num_heads,\n dim_feedforward, dropout, num_layers, decoder_feature_size, quaternions\n =quaternions)\n', (5437, 5564), False, 'from transformers.transformers import InferenceTransformerEncoder, InferenceTransformer\n'), ((5776, 5801), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5799, 5801), False, 'import torch\n'), ((5823, 5845), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (5838, 5845), False, 'from torch import nn, optim\n'), ((6467, 6478), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6476, 6478), False, 'from torch import nn, optim\n'), ((6480, 6494), 'common.losses.QuatDistance', 'QuatDistance', ([], {}), '()\n', (6492, 6494), False, 'from common.losses import QuatDistance\n'), ((4333, 4358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4356, 4358), False, 'import torch\n'), ((4267, 4292), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4290, 4292), False, 'import torch\n')] |
from typing import Tuple
import torch
import torch.nn as nn
from pyro.distributions.util import broadcast_shape
from pyro_util.modules.weight_scaling import GammaReLU, WSLinear
T = torch.Tensor
def make_ws_fc(*dims: int) -> nn.Module:
"""Helper function for creating a fully connected neural network.
This version uses weight-scaled linear layers and gamma-scaled ReLU
:param dims: The size of the layers in the network (at least 2)
:return: nn.Sequential containing all the layers
"""
layers = [WSLinear(dims[0], dims[1])]
for in_dim, out_dim in zip(dims[1:], dims[2:]):
layers.append(GammaReLU())
layers.append(WSLinear(in_dim, out_dim))
return nn.Sequential(*layers)
def make_bn_fc(*dims: int) -> nn.Module:
"""Helper function for creating a fully connected neural network.
This version uses BatchNorm between linear layers.
:param dims: The size of the layers in the network (at least 2)
:return: nn.Sequential containing all the layers
"""
layers = [nn.Linear(dims[0], dims[1])]
for in_dim, out_dim in zip(dims[1:], dims[2:]):
layers.append(nn.BatchNorm1d(in_dim))
layers.append(nn.ReLU())
layers.append(nn.Linear(in_dim, out_dim))
return nn.Sequential(*layers)
def split_in_half(t: T) -> Tuple[T, T]:
"""Splits a tensor in half along the final dimension"""
return t.reshape(t.shape[:-1] + (2, -1)).unbind(-2)
def broadcast_inputs(input_args):
"""Helper for broadcasting inputs to neural net"""
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return input_args
| [
"torch.nn.ReLU",
"pyro_util.modules.weight_scaling.WSLinear",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"pyro_util.modules.weight_scaling.GammaReLU",
"pyro.distributions.util.broadcast_shape",
"torch.nn.Linear"
] | [((704, 726), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (717, 726), True, 'import torch.nn as nn\n'), ((1263, 1285), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1276, 1285), True, 'import torch.nn as nn\n'), ((527, 553), 'pyro_util.modules.weight_scaling.WSLinear', 'WSLinear', (['dims[0]', 'dims[1]'], {}), '(dims[0], dims[1])\n', (535, 553), False, 'from pyro_util.modules.weight_scaling import GammaReLU, WSLinear\n'), ((1040, 1067), 'torch.nn.Linear', 'nn.Linear', (['dims[0]', 'dims[1]'], {}), '(dims[0], dims[1])\n', (1049, 1067), True, 'import torch.nn as nn\n'), ((1547, 1599), 'pyro.distributions.util.broadcast_shape', 'broadcast_shape', (['*[s.shape[:-1] for s in input_args]'], {}), '(*[s.shape[:-1] for s in input_args])\n', (1562, 1599), False, 'from pyro.distributions.util import broadcast_shape\n'), ((630, 641), 'pyro_util.modules.weight_scaling.GammaReLU', 'GammaReLU', ([], {}), '()\n', (639, 641), False, 'from pyro_util.modules.weight_scaling import GammaReLU, WSLinear\n'), ((665, 690), 'pyro_util.modules.weight_scaling.WSLinear', 'WSLinear', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (673, 690), False, 'from pyro_util.modules.weight_scaling import GammaReLU, WSLinear\n'), ((1144, 1166), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['in_dim'], {}), '(in_dim)\n', (1158, 1166), True, 'import torch.nn as nn\n'), ((1190, 1199), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1197, 1199), True, 'import torch.nn as nn\n'), ((1223, 1249), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (1232, 1249), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python
'''
catalog_harvesting/util.py
General utilities for the project
'''
import random
def unique_id():
'''
Return a random 17-character string that works well for mongo IDs
'''
charmap = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
return ''.join([random.choice(charmap) for i in range(17)])
| [
"random.choice"
] | [((310, 332), 'random.choice', 'random.choice', (['charmap'], {}), '(charmap)\n', (323, 332), False, 'import random\n')] |
from WeatherScreens.RingScreen import RingScreen
from WeatherScreens.QuadrantScreen import QuadrantScreen
from WeatherScreens.ImageScreen import ImageScreen
from WeatherScreens.ScreenBase import ScreenBase
from datetime import datetime, timedelta
from suntime import Sun, SunTimeException
from dateutil import tz
import pyowm
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WeatherFrame CLI Utility")
parser.add_argument("--lat", type=float,
help="Latitude in decimal form")
parser.add_argument("--long", type=float,
help="Longitude in decimal form")
parser.add_argument("--owm", type=str,
help="OpenWeatherMap API Token")
parser.add_argument("--type", type=str,
help="Screen type")
parser.add_argument("--image", type=str,
help="Image path")
args = parser.parse_args()
latitude = args.lat
longitude = args.long
owm_token = args.owm
screen_type = args.type
image_path = args.image
# MOCK data
weather_data = {
'wind': {'speed': 33.5, 'deg': 190, 'gust': 42.12},
'humidity': 100,
'humidity_indoor': 47,
'temp': {'temp': -33.77, 'temp_max': 0.56, 'temp_min': -2.0},
'temp_indoor': 24.12,
'status': 'Mist',
'clouds': 90,
'pressure': {'press': 1009, 'sea_level': 1038.381},
'observation_time': "2020-01-25 09:04:34+00",
'forecast': [
{'status': 'Clouds', 'temp': {'temp': -0.52, 'temp_max': 0.83, 'temp_min': -0.52, 'temp_kf': -1.35}, 'wind': {'speed': 2.21, 'deg': 88}, 'date': "2020-01-26 15:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -1.69, 'temp_max': -0.68, 'temp_min': -1.69, 'temp_kf': -1.01}, 'wind': {'speed': 1.73, 'deg': 80}, 'date': "2020-01-26 18:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -1.75, 'temp_max': -1.07, 'temp_min': -1.75, 'temp_kf': -0.68}, 'wind': {'speed': 1.42, 'deg': 45}, 'date': "2020-01-26 21:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -1.66, 'temp_max': -1.32, 'temp_min': -1.66, 'temp_kf': -0.34}, 'wind': {'speed': 1.32, 'deg': 8}, 'date': "2020-01-27 00:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -1.56, 'temp_kf': -273.15, 'temp_max': -1.56, 'temp_min': -1.56}, 'wind': {'speed': 0.83, 'deg': 17}, 'date': "2020-01-27 03:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -1.48, 'temp_kf': -273.15, 'temp_max': -1.48, 'temp_min': -1.48}, 'wind': {'speed': 1.09, 'deg': 317}, 'date': "2020-01-27 06:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 1.78, 'temp_kf': -273.15, 'temp_max': 1.78, 'temp_min': 1.78}, 'wind': {'speed': 1.53, 'deg': 302}, 'date': "2020-01-27 09:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 4.87, 'temp_kf': -273.15, 'temp_max': 4.87, 'temp_min': 4.87}, 'wind': {'speed': 1.39, 'deg': 267}, 'date': "2020-01-27 12:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 3.01, 'temp_kf': -273.15, 'temp_max': 3.01, 'temp_min': 3.01}, 'wind': {'speed': 1.96, 'deg': 187}, 'date': "2020-01-27 15:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 1.33, 'temp_kf': -273.15, 'temp_max': 1.33, 'temp_min': 1.33}, 'wind': {'speed': 3.08, 'deg': 141}, 'date': "2020-01-27 18:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 1.25, 'temp_kf': -273.15, 'temp_max': 1.25, 'temp_min': 1.25}, 'wind': {'speed': 3.64, 'deg': 140}, 'date': "2020-01-27 21:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 1.46, 'temp_kf': -273.15, 'temp_max': 1.46, 'temp_min': 1.46}, 'wind': {'speed': 5.11, 'deg': 138}, 'date': "2020-01-28 00:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 2.65, 'temp_kf': -273.15, 'temp_max': 2.65, 'temp_min': 2.65}, 'wind': {'speed': 6.79, 'deg': 142}, 'date': "2020-01-28 03:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 3.88, 'temp_kf': -273.15, 'temp_max': 3.88, 'temp_min': 3.88}, 'wind': {'speed': 5.3, 'deg': 164}, 'date': "2020-01-28 06:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 5.47, 'temp_kf': -273.15, 'temp_max': 5.47, 'temp_min': 5.47}, 'wind': {'speed': 5.01, 'deg': 143}, 'date': "2020-01-28 09:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 6.44, 'temp_kf': -273.15, 'temp_max': 6.44, 'temp_min': 6.44}, 'wind': {'speed': 3.59, 'deg': 335}, 'date': "2020-01-28 12:00:00+00"},
{'status': 'Rain', 'temp': {'temp': 5.16, 'temp_kf': -273.15, 'temp_max': 5.16, 'temp_min': 5.16}, 'wind': {'speed': 3.21, 'deg': 264}, 'date': "2020-01-28 15:00:00+00"},
{'status': 'Rain', 'temp': {'temp': 3.55, 'temp_kf': -273.15, 'temp_max': 3.55, 'temp_min': 3.55}, 'wind': {'speed': 3.59, 'deg': 321}, 'date': "2020-01-28 18:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 3.97, 'temp_kf': -273.15, 'temp_max': 3.97, 'temp_min': 3.97}, 'wind': {'speed': 7.12, 'deg': 301}, 'date': "2020-01-28 21:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 2.98, 'temp_kf': -273.15, 'temp_max': 2.98, 'temp_min': 2.98}, 'wind': {'speed': 6.25, 'deg': 277}, 'date': "2020-01-29 00:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 1.37, 'temp_kf': -273.15, 'temp_max': 1.37, 'temp_min': 1.37}, 'wind': {'speed': 3.69, 'deg': 263}, 'date': "2020-01-29 03:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 2.09, 'temp_kf': -273.15, 'temp_max': 2.09, 'temp_min': 2.09}, 'wind': {'speed': 5.82, 'deg': 213}, 'date': "2020-01-29 06:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 4.53, 'temp_kf': -273.15, 'temp_max': 4.53, 'temp_min': 4.53}, 'wind': {'speed': 3.18, 'deg': 260}, 'date': "2020-01-29 09:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 5.56, 'temp_kf': -273.15, 'temp_max': 5.56, 'temp_min': 5.56}, 'wind': {'speed': 11.16, 'deg': 291}, 'date': "2020-01-29 12:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 4.4, 'temp_kf': -273.15, 'temp_max': 4.4, 'temp_min': 4.4}, 'wind': {'speed': 9.39, 'deg': 296}, 'date': "2020-01-29 15:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 3.49, 'temp_kf': -273.15, 'temp_max': 3.49, 'temp_min': 3.49}, 'wind': {'speed': 12.78, 'deg': 298}, 'date': "2020-01-29 18:00:00+00"},
{'status': 'Clear', 'temp': {'temp': 2.37, 'temp_kf': -273.15, 'temp_max': 2.37, 'temp_min': 2.37}, 'wind': {'speed': 6.79, 'deg': 288}, 'date': "2020-01-29 21:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 2.59, 'temp_kf': -273.15, 'temp_max': 2.59, 'temp_min': 2.59}, 'wind': {'speed': 8.32, 'deg': 292}, 'date': "2020-01-30 00:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 1.8, 'temp_kf': -273.15, 'temp_max': 1.8, 'temp_min': 1.8}, 'wind': {'speed': 7.83, 'deg': 294}, 'date': "2020-01-30 03:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 1.06, 'temp_kf': -273.15, 'temp_max': 1.06, 'temp_min': 1.06}, 'wind': {'speed': 5.74, 'deg': 303}, 'date': "2020-01-30 06:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 3.67, 'temp_kf': -273.15, 'temp_max': 3.67, 'temp_min': 3.67}, 'wind': {'speed': 9.05, 'deg': 305}, 'date': "2020-01-30 09:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 5.38, 'temp_kf': -273.15, 'temp_max': 5.38, 'temp_min': 5.38}, 'wind': {'speed': 9.72, 'deg': 299}, 'date': "2020-01-30 12:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 4.55, 'temp_kf': -273.15, 'temp_max': 4.55, 'temp_min': 4.55}, 'wind': {'speed': 4.51, 'deg': 294}, 'date': "2020-01-30 15:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 3.21, 'temp_kf': -273.15, 'temp_max': 3.21, 'temp_min': 3.21}, 'wind': {'speed': 4.77, 'deg': 298}, 'date': "2020-01-30 18:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 1.39, 'temp_kf': -273.15, 'temp_max': 1.39, 'temp_min': 1.39}, 'wind': {'speed': 1.37, 'deg': 269}, 'date': "2020-01-30 21:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 0.23, 'temp_kf': -273.15, 'temp_max': 0.23, 'temp_min': 0.23}, 'wind': {'speed': 1.08, 'deg': 155}, 'date': "2020-01-31 00:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -0.07, 'temp_kf': -273.15, 'temp_max': -0.07, 'temp_min': -0.07}, 'wind': {'speed': 0.35, 'deg': 28}, 'date': "2020-01-31 03:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': -0.09, 'temp_kf': -273.15, 'temp_max': -0.09, 'temp_min': -0.09}, 'wind': {'speed': 0.47, 'deg': 342}, 'date': "2020-01-31 06:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 3.67, 'temp_kf': -273.15, 'temp_max': 3.67, 'temp_min': 3.67}, 'wind': {'speed': 1.49, 'deg': 286}, 'date': "2020-01-31 09:00:00+00"},
{'status': 'Clouds', 'temp': {'temp': 6.95, 'temp_kf': -273.15, 'temp_max': 6.95, 'temp_min': 6.95}, 'wind': {'speed': 1.9, 'deg': 258}, 'date': "2020-01-31 12:00:00+00"}
]
}
# correct weather data forecast dates
fixed_forecast = []
now = datetime.now()
datapoint_datetime = datetime.strptime(weather_data["forecast"][0]["date"], "%Y-%m-%d %H:%M:%S+00")
diff = now - datapoint_datetime
for x in weather_data["forecast"]:
x_date = datapoint_datetime = datetime.strptime(x["date"], "%Y-%m-%d %H:%M:%S+00")
x["date"] = x_date + timedelta(days=diff.days+1)
x["date"] = x["date"].strftime("%Y-%m-%d %H:%M:%S+00")
fixed_forecast.append(x)
weather_data["forecast"] = fixed_forecast
owm = pyowm.OWM(owm_token)
observation = owm.weather_at_coords(latitude, longitude)
w = observation.get_weather()
weather_data = {
'wind': w.get_wind(),
'humidity': w.get_humidity(),
'temp': w.get_temperature('celsius'),
'clouds': w.get_clouds(),
'pressure': w.get_pressure(),
'status': w.get_status(),
'observation_time': observation.get_reception_time(timeformat="iso")
}
screen = None
if screen_type == "ring":
screen = RingScreen(coordinates=(latitude, longitude),
weather_data=weather_data)
elif screen_type == "quadrant":
screen = QuadrantScreen(coordinates=(latitude, longitude),
weather_data=weather_data)
elif screen_type == "image":
screen = ImageScreen(path=image_path)
else:
screen = ScreenBase()
image = screen.render()
image.show()
| [
"WeatherScreens.ScreenBase.ScreenBase",
"argparse.ArgumentParser",
"datetime.datetime.strptime",
"pyowm.OWM",
"WeatherScreens.ImageScreen.ImageScreen",
"datetime.datetime.now",
"WeatherScreens.QuadrantScreen.QuadrantScreen",
"datetime.timedelta",
"WeatherScreens.RingScreen.RingScreen"
] | [((385, 448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""WeatherFrame CLI Utility"""'}), "(description='WeatherFrame CLI Utility')\n", (408, 448), False, 'import argparse\n'), ((9014, 9028), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9026, 9028), False, 'from datetime import datetime, timedelta\n'), ((9054, 9132), 'datetime.datetime.strptime', 'datetime.strptime', (["weather_data['forecast'][0]['date']", '"""%Y-%m-%d %H:%M:%S+00"""'], {}), "(weather_data['forecast'][0]['date'], '%Y-%m-%d %H:%M:%S+00')\n", (9071, 9132), False, 'from datetime import datetime, timedelta\n'), ((9509, 9529), 'pyowm.OWM', 'pyowm.OWM', (['owm_token'], {}), '(owm_token)\n', (9518, 9529), False, 'import pyowm\n'), ((9246, 9298), 'datetime.datetime.strptime', 'datetime.strptime', (["x['date']", '"""%Y-%m-%d %H:%M:%S+00"""'], {}), "(x['date'], '%Y-%m-%d %H:%M:%S+00')\n", (9263, 9298), False, 'from datetime import datetime, timedelta\n'), ((10014, 10086), 'WeatherScreens.RingScreen.RingScreen', 'RingScreen', ([], {'coordinates': '(latitude, longitude)', 'weather_data': 'weather_data'}), '(coordinates=(latitude, longitude), weather_data=weather_data)\n', (10024, 10086), False, 'from WeatherScreens.RingScreen import RingScreen\n'), ((9328, 9357), 'datetime.timedelta', 'timedelta', ([], {'days': '(diff.days + 1)'}), '(days=diff.days + 1)\n', (9337, 9357), False, 'from datetime import datetime, timedelta\n'), ((10168, 10244), 'WeatherScreens.QuadrantScreen.QuadrantScreen', 'QuadrantScreen', ([], {'coordinates': '(latitude, longitude)', 'weather_data': 'weather_data'}), '(coordinates=(latitude, longitude), weather_data=weather_data)\n', (10182, 10244), False, 'from WeatherScreens.QuadrantScreen import QuadrantScreen\n'), ((10327, 10355), 'WeatherScreens.ImageScreen.ImageScreen', 'ImageScreen', ([], {'path': 'image_path'}), '(path=image_path)\n', (10338, 10355), False, 'from WeatherScreens.ImageScreen import ImageScreen\n'), ((10383, 10395), 'WeatherScreens.ScreenBase.ScreenBase', 'ScreenBase', ([], {}), '()\n', (10393, 10395), False, 'from WeatherScreens.ScreenBase import ScreenBase\n')] |
from django.conf.urls import url, include
from django.conf import settings
from . import views
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'manage/', views.index),
]
| [
"django.conf.urls.url"
] | [((222, 249), 'django.conf.urls.url', 'url', (['"""manage/"""', 'views.index'], {}), "('manage/', views.index)\n", (225, 249), False, 'from django.conf.urls import url, include\n')] |
# This code is generated automatically by ClointFusion BOT Builder Tool.
import ClointFusion as cf
import time
cf.window_show_desktop()
cf.mouse_click(int(cf.pg.size()[0]/2),int(cf.pg.size()[1]/2))
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_5fa2gg4s_generator\Images\Snips\1--1788_368.png',conf=0.7, wait=12),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(1788,368,left_or_right='left', single_double_triple = 'single')
time.sleep(2)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_5fa2gg4s_generator\Images\Snips\2--246_938.png',conf=0.7, wait=10),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(246,938,left_or_right='left', single_double_triple = 'single')
time.sleep(0)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_5fa2gg4s_generator\Images\Snips\3--246_938.png',conf=0.7, wait=13),left_or_right='left', single_double_triple = 'double')
except:
cf.mouse_click(246,938,left_or_right='left', single_double_triple = 'double')
time.sleep(3)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_5fa2gg4s_generator\Images\Snips\4-NewTabGoogleChrome-385_77.png',conf=0.7, wait=11),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(385,77,left_or_right='left', single_double_triple = 'single')
time.sleep(1)
cf.key_write_enter('modi')
time.sleep(0)
cf.key_press('enter')
time.sleep(3)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_5fa2gg4s_generator\Images\Snips\5-modiGoogleSearchGoogleChrome-1905_57.png',conf=0.7, wait=10),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(1905,57,left_or_right='left', single_double_triple = 'single')
time.sleep(0)
| [
"ClointFusion.key_write_enter",
"ClointFusion.window_show_desktop",
"ClointFusion.key_press",
"time.sleep",
"ClointFusion.mouse_search_snip_return_coordinates_x_y",
"ClointFusion.pg.size",
"ClointFusion.mouse_click"
] | [((114, 138), 'ClointFusion.window_show_desktop', 'cf.window_show_desktop', ([], {}), '()\n', (136, 138), True, 'import ClointFusion as cf\n'), ((530, 543), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (540, 543), False, 'import time\n'), ((869, 882), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (879, 882), False, 'import time\n'), ((1208, 1221), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1218, 1221), False, 'import time\n'), ((1563, 1576), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1573, 1576), False, 'import time\n'), ((1577, 1603), 'ClointFusion.key_write_enter', 'cf.key_write_enter', (['"""modi"""'], {}), "('modi')\n", (1595, 1603), True, 'import ClointFusion as cf\n'), ((1604, 1617), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (1614, 1617), False, 'import time\n'), ((1618, 1639), 'ClointFusion.key_press', 'cf.key_press', (['"""enter"""'], {}), "('enter')\n", (1630, 1639), True, 'import ClointFusion as cf\n'), ((1640, 1653), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1650, 1653), False, 'import time\n'), ((2007, 2020), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (2017, 2020), False, 'import time\n'), ((451, 529), 'ClointFusion.mouse_click', 'cf.mouse_click', (['(1788)', '(368)'], {'left_or_right': '"""left"""', 'single_double_triple': '"""single"""'}), "(1788, 368, left_or_right='left', single_double_triple='single')\n", (465, 529), True, 'import ClointFusion as cf\n'), ((791, 868), 'ClointFusion.mouse_click', 'cf.mouse_click', (['(246)', '(938)'], {'left_or_right': '"""left"""', 'single_double_triple': '"""single"""'}), "(246, 938, left_or_right='left', single_double_triple='single')\n", (805, 868), True, 'import ClointFusion as cf\n'), ((1130, 1207), 'ClointFusion.mouse_click', 'cf.mouse_click', (['(246)', '(938)'], {'left_or_right': '"""left"""', 'single_double_triple': '"""double"""'}), "(246, 938, left_or_right='left', single_double_triple='double')\n", (1144, 1207), True, 'import ClointFusion as cf\n'), ((1486, 1562), 'ClointFusion.mouse_click', 'cf.mouse_click', (['(385)', '(77)'], {'left_or_right': '"""left"""', 'single_double_triple': '"""single"""'}), "(385, 77, left_or_right='left', single_double_triple='single')\n", (1500, 1562), True, 'import ClointFusion as cf\n'), ((1929, 2006), 'ClointFusion.mouse_click', 'cf.mouse_click', (['(1905)', '(57)'], {'left_or_right': '"""left"""', 'single_double_triple': '"""single"""'}), "(1905, 57, left_or_right='left', single_double_triple='single')\n", (1943, 2006), True, 'import ClointFusion as cf\n'), ((229, 402), 'ClointFusion.mouse_search_snip_return_coordinates_x_y', 'cf.mouse_search_snip_return_coordinates_x_y', (['"""C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\1--1788_368.png"""'], {'conf': '(0.7)', 'wait': '(12)'}), "(\n 'C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\1--1788_368.png'\n , conf=0.7, wait=12)\n", (272, 402), True, 'import ClointFusion as cf\n'), ((570, 742), 'ClointFusion.mouse_search_snip_return_coordinates_x_y', 'cf.mouse_search_snip_return_coordinates_x_y', (['"""C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\2--246_938.png"""'], {'conf': '(0.7)', 'wait': '(10)'}), "(\n 'C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\2--246_938.png'\n , conf=0.7, wait=10)\n", (613, 742), True, 'import ClointFusion as cf\n'), ((909, 1081), 'ClointFusion.mouse_search_snip_return_coordinates_x_y', 'cf.mouse_search_snip_return_coordinates_x_y', (['"""C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\3--246_938.png"""'], {'conf': '(0.7)', 'wait': '(13)'}), "(\n 'C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\3--246_938.png'\n , conf=0.7, wait=13)\n", (952, 1081), True, 'import ClointFusion as cf\n'), ((1248, 1437), 'ClointFusion.mouse_search_snip_return_coordinates_x_y', 'cf.mouse_search_snip_return_coordinates_x_y', (['"""C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\4-NewTabGoogleChrome-385_77.png"""'], {'conf': '(0.7)', 'wait': '(11)'}), "(\n 'C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\4-NewTabGoogleChrome-385_77.png'\n , conf=0.7, wait=11)\n", (1291, 1437), True, 'import ClointFusion as cf\n'), ((1680, 1880), 'ClointFusion.mouse_search_snip_return_coordinates_x_y', 'cf.mouse_search_snip_return_coordinates_x_y', (['"""C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\5-modiGoogleSearchGoogleChrome-1905_57.png"""'], {'conf': '(0.7)', 'wait': '(10)'}), "(\n 'C:\\\\Users\\\\mrmay\\\\AppData\\\\Local\\\\Temp\\\\cf_log_5fa2gg4s_generator\\\\Images\\\\Snips\\\\5-modiGoogleSearchGoogleChrome-1905_57.png'\n , conf=0.7, wait=10)\n", (1723, 1880), True, 'import ClointFusion as cf\n'), ((159, 171), 'ClointFusion.pg.size', 'cf.pg.size', ([], {}), '()\n', (169, 171), True, 'import ClointFusion as cf\n'), ((182, 194), 'ClointFusion.pg.size', 'cf.pg.size', ([], {}), '()\n', (192, 194), True, 'import ClointFusion as cf\n')] |
#!/usr/bin/env nemesis
"""
This script creates a spatial database for the initial stress and state
variables for a Maxwell plane strain material.
"""
sim = "gravity_vardensity"
materials = ["crust","mantle"]
import numpy
import h5py
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs._configure()
cs.setSpaceDim(2)
# Basis functions for quad4 cell evaluated at quadrature points. Use
# to compute coordinate of quadrature points in each cell from
# coordinates of vertices. Note the order must correspond to the order
# of the data at the quadrature points in the output.
qpts = numpy.array([[ 0.62200847, 0.16666667, 0.0446582, 0.16666667],
[ 0.16666667, 0.62200847, 0.16666667, 0.0446582 ],
[ 0.16666667, 0.0446582, 0.16666667, 0.62200847],
[ 0.0446582, 0.16666667, 0.62200847, 0.16666667]], dtype=numpy.float64)
def calcQuadCoords(vertices, cells, qpts):
"""Compute coordinates of quadrature points."""
nqpts = qpts.shape[0]
ncells = cells.shape[0]
spaceDim = vertices.shape[1]
quadCoords = numpy.zeros((ncells, nqpts, spaceDim), dtype=numpy.float64)
cellCoords = vertices[cells,:]
for iDim in range(spaceDim):
quadCoords[:,:,iDim] = numpy.dot(cellCoords[:,:,iDim], qpts.transpose())
quadCoords = quadCoords.reshape((ncells*nqpts, spaceDim))
return quadCoords
for material in materials:
filenameH5 = "output/%s-%s.h5" % (sim, material)
filenameDB = "%s_statevars-%s.spatialdb" % (sim, material)
# Open HDF5 file and get coordinates, cells, and stress.
h5 = h5py.File(filenameH5, "r")
vertices = h5['geometry/vertices'][:]
tindex = -1
cells = numpy.array(h5['topology/cells'][:], dtype=numpy.int)
stress = h5['cell_fields/stress'][tindex,:,:]
if "mantle" in material:
vstrain = h5['cell_fields/viscous_strain'][tindex,:,:]
h5.close()
# Compute coordinates of quadrature points.
quadCoords = calcQuadCoords(vertices, cells, qpts)
nqpts = qpts.shape[0]
ncells = cells.shape[0]
nvalues = stress.shape[1]/nqpts
# Check to make sure output included all quadrature points (CellFilterAvg was not used).
if stress.shape[1] == 3:
raise ValueError("Found %d stress values for each cell. Expected 12 stress values (stress_xx, stress_yy, and stress_xy at 4 quadrature points) for each cell. Turn off CellFilterAvg in pylithapp.cfg." % stress.shape[1])
if stress.shape[1] != nqpts*3:
raise ValueError("Found %d stress values for each cell. Expected 12 stress values (stress_xx, stress_yy, and stress_xy at 4 quadrature points) for each cell. Did you turn off CellFilterAvg in pylithapp.cfg?" % stress.shape[1])
stress = stress.reshape((ncells*nqpts, nvalues))
# Create writer for spatial database file
writer = SimpleIOAscii()
writer.inventory.filename = filenameDB
writer._configure()
values = [{'name': "stress-xx",
'units': "Pa",
'data': stress[:,0]},
{'name': "stress-yy",
'units': "Pa",
'data': stress[:,1]},
{'name': "stress-xy",
'units': "Pa",
'data': stress[:,2]},
]
if "mantle" in material:
nvalues = vstrain.shape[1]/nqpts
vstrain = vstrain.reshape((ncells*nqpts, nvalues))
stressZZ = 0.5*(stress[:,0]+stress[:,1])
zeros = numpy.zeros(stressZZ.shape)
values += [{'name': "stress-zz-initial",
'units': "Pa",
'data': stressZZ},
{'name': "total-strain-xx",
'units': "None",
'data': zeros},
{'name': "total-strain-yy",
'units': "None",
'data': zeros},
{'name': "total-strain-xy",
'units': "None",
'data': zeros},
{'name': "viscous-strain-xx",
'units': "None",
'data': vstrain[:,0]},
{'name': "viscous-strain-yy",
'units': "None",
'data': vstrain[:,1]},
{'name': "viscous-strain-zz",
'units': "None",
'data': vstrain[:,2]},
{'name': "viscous-strain-xy",
'units': "None",
'data': vstrain[:,3]},
]
writer.write({'points': quadCoords,
'coordsys': cs,
'data_dim': 2,
'values': values})
# End of file
| [
"spatialdata.spatialdb.SimpleIOAscii.SimpleIOAscii",
"h5py.File",
"numpy.array",
"numpy.zeros",
"spatialdata.geocoords.CSCart.CSCart"
] | [((351, 359), 'spatialdata.geocoords.CSCart.CSCart', 'CSCart', ([], {}), '()\n', (357, 359), False, 'from spatialdata.geocoords.CSCart import CSCart\n'), ((659, 903), 'numpy.array', 'numpy.array', (['[[0.62200847, 0.16666667, 0.0446582, 0.16666667], [0.16666667, 0.62200847, \n 0.16666667, 0.0446582], [0.16666667, 0.0446582, 0.16666667, 0.62200847],\n [0.0446582, 0.16666667, 0.62200847, 0.16666667]]'], {'dtype': 'numpy.float64'}), '([[0.62200847, 0.16666667, 0.0446582, 0.16666667], [0.16666667, \n 0.62200847, 0.16666667, 0.0446582], [0.16666667, 0.0446582, 0.16666667,\n 0.62200847], [0.0446582, 0.16666667, 0.62200847, 0.16666667]], dtype=\n numpy.float64)\n', (670, 903), False, 'import numpy\n'), ((1162, 1221), 'numpy.zeros', 'numpy.zeros', (['(ncells, nqpts, spaceDim)'], {'dtype': 'numpy.float64'}), '((ncells, nqpts, spaceDim), dtype=numpy.float64)\n', (1173, 1221), False, 'import numpy\n'), ((1652, 1678), 'h5py.File', 'h5py.File', (['filenameH5', '"""r"""'], {}), "(filenameH5, 'r')\n", (1661, 1678), False, 'import h5py\n'), ((1743, 1796), 'numpy.array', 'numpy.array', (["h5['topology/cells'][:]"], {'dtype': 'numpy.int'}), "(h5['topology/cells'][:], dtype=numpy.int)\n", (1754, 1796), False, 'import numpy\n'), ((2850, 2865), 'spatialdata.spatialdb.SimpleIOAscii.SimpleIOAscii', 'SimpleIOAscii', ([], {}), '()\n', (2863, 2865), False, 'from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii\n'), ((3414, 3441), 'numpy.zeros', 'numpy.zeros', (['stressZZ.shape'], {}), '(stressZZ.shape)\n', (3425, 3441), False, 'import numpy\n')] |
import datetime
import typing
from . import enums, tools
class CatalogueAPIWrapper:
"""Methods for listing objects"""
def __init__(
self, username: str, password: str, language: enums.Language = enums.Language.GERMAN
):
"""Create a new Wrapper containing functions for listing different object types
:param username: The username which will be used for authenticating at the database. Due
to constraints of the database the username needs to be exactly 10 characters long and
may not contain any whitespaces
:type username: str
:param password: The password which will be used for authenticating at the database. Due
to constraints of the database the password needs to be at least 10 characters long,
may not exceed 20 characters and may not contain any whitespaces
:type password: str
:param language: The language in which the responses are returned by the database.
:py:enum:mem:`~genesis_api_wrapper.enums.Language.GERMAN` has the most compatibility
with the database
since most of the tables are on German. Therefore, this parameter defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.Language.GERMAN`
:type language: enums.Language
:raise ValueError: The username or the password did not match the constraints stated in
their description.
"""
if " " in username:
raise ValueError("The username may not contain any whitespaces")
if len(username) != 10:
raise ValueError("The username may only be 10 characters long")
if " " in password:
raise ValueError("The password may not contain any whitespaces")
if len(password) < 10:
raise ValueError(
f"The password may not be shorter than 10 characters. Current "
f"length: {len(password)}"
)
if len(password) > 20:
raise ValueError(
f"The password may not be longer that 20 characters. Current "
f"length: {len(password)}"
)
self._username = username
self._password = password
self._language = language
self._service_url = "/catalogue"
self._base_parameter = {
"username": self._username,
"password": self._password,
"language": self._language.value,
}
async def cubes(
self,
object_name: str,
storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""
**PREMIUM ACCESS REQUIRED**
List the datacubes matching the ``object_name``
:param object_name: The identifier code of the data cubes. The usage of an asterisk
(``*``) is permitted as wildcard
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage, optional
:param result_count: The maximal amount of results which are returned by the database,
defaults to 100
:type result_count: int, optional
:return: The response from the database parsed into a dict. If the ``Content-Type``
header indicated a non-JSON response the response is stored in a temporary file and
the file path will be returned
:rtype: dict, os.PathLike
:raises exceptions.GENESISPermissionError: The supplied account does not have the
permissions to access data cubes.
:raises ValueError: One of the parameters does not contain a valid value. Please check
the message of the exception for further information
"""
if " " in object_name:
raise ValueError("The object_name parameter may not contain whitespaces")
if len(object_name) == 0:
raise ValueError("The object_name parameter may not be empty")
if len(object_name) > 10:
raise ValueError("The object_name parameter may not exceed 10 characters")
if type(storage_location) is not enums.ObjectStorage:
raise ValueError(
f"The storage_location parameter only accepts "
f"{repr(enums.ObjectStorage)} values"
)
if result_count < 1:
raise ValueError("The result_count parameter value may not be below 0")
if result_count > 2500:
raise ValueError("The result_count parameter value may not exceed 2500")
query_parameters = self._base_parameter | {
"selection": object_name,
"area": storage_location.value,
"pagelength": result_count,
}
query_path = self._service_url + "/cubes"
return await tools.get_database_response(query_path, query_parameters)
async def cubes2statistic(
self,
object_name: str,
cube_code: typing.Optional[str] = None,
storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""
**PREMIUM ACCESS REQUIRED**
List the datacubes matching the ``object_name``
:param object_name: The identifier code of the statistic
:type object_name: str
:param cube_code: The identifier code of the cube. The usage of an asterisk
(``*``) is permitted as wildcard. This value acts as filter, only showing the data
cubes matching this code
:type cube_code: str, optional
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:param result_count: The maximal amount of results which are returned by the database,
defaults to 100
:type result_count: int
:return: The response from the database parsed into a dict. If the ``Content-Type``
header indicated a non-JSON response the response is stored in a temporary file and
the file path will be returned
:rtype: dict, os.PathLike
:raises exceptions.GENESISPermissionError: The supplied account does not have the
permissions to access data cubes.
:raises ValueError: One of the parameters does not contain a valid value. Please check
the message of the exception for further information
"""
if " " in object_name:
raise ValueError("The object_name parameter may not contain whitespaces")
if "*" in object_name:
raise ValueError(
"The object_name parameter may not contain asterisks. Wildcards are "
"not permitted"
)
if len(object_name) == 0:
raise ValueError("The object_name parameter may not be empty")
if len(object_name) > 6:
raise ValueError("The object_name parameter may not exceed 6 characters")
if cube_code is not None and " " in cube_code:
raise ValueError("The cube_code parameter may not contain whitespaces")
if cube_code is not None and len(cube_code) == 0:
raise ValueError("The cube_code parameter may not be empty")
if cube_code is not None and len(cube_code) > 10:
raise ValueError("The cube_code parameter may not exceed 10 characters")
if type(storage_location) is not enums.ObjectStorage:
raise ValueError(
f"The storage_location parameter only accepts "
f"{repr(enums.ObjectStorage)} values"
)
if result_count < 1:
raise ValueError("The result_count parameter value may not be below 0")
if result_count > 2500:
raise ValueError("The result_count parameter value may not exceed 2500")
query_parameters = self._base_parameter | {
"name": object_name,
"selection": "" if cube_code is None else cube_code,
"area": storage_location.value,
"pagelength": result_count,
}
query_path = self._service_url + "/cubes2statistic"
return await tools.get_database_response(query_path, query_parameters)
async def cubes2variable(
self,
object_name: str,
cube_code: str,
storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""
**PREMIUM ACCESS REQUIRED**
List the datacubes matching the ``object_name``
:param object_name: The identifier code of the variable
:type object_name: str
:param cube_code: The identifier code of the cube. The usage of an asterisk
(``*``) is permitted as wildcard. This value acts as filter, only showing the
data cubes matching this code
:type cube_code: str, optional
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:param result_count: The maximal amount of results which are returned by the
database,
defaults to 100
:type result_count: int
:return: The response from the database parsed into a dict. If the ``Content-Type``
header indicated a non-JSON response the response is stored in a temporary
file and
the file path will be returned
:rtype: dict, os.PathLike
:raises exceptions.GENESISPermissionError: The supplied account does not have the
permissions to access data cubes.
:raises ValueError: One of the parameters does not contain a valid value. Please check
the message of the exception for further information
"""
if " " in object_name:
raise ValueError("The object_name parameter may not contain whitespaces")
if "*" in object_name:
raise ValueError(
"The object_name parameter may not contain asterisks. Wildcards are "
"not permitted"
)
if len(object_name) == 0:
raise ValueError("The object_name parameter may not be empty")
if len(object_name) > 6:
raise ValueError("The object_name parameter may not exceed 6 characters")
if cube_code is not None and " " in cube_code:
raise ValueError("The cube_code parameter may not contain whitespaces")
if cube_code is not None and len(cube_code) == 0:
raise ValueError("The cube_code parameter may not be empty")
if cube_code is not None and len(cube_code) > 10:
raise ValueError("The cube_code parameter may not exceed 10 characters")
if type(storage_location) is not enums.ObjectStorage:
raise ValueError(
f"The storage_location parameter only accepts "
f"{repr(enums.ObjectStorage)} values"
)
if result_count < 1:
raise ValueError("The result_count parameter value may not be below 0")
if result_count > 2500:
raise ValueError("The result_count parameter value may not exceed 2500")
query_parameters = self._base_parameter | {
"name": object_name,
"selection": "" if cube_code is None else cube_code,
"area": storage_location.value,
"pagelength": result_count,
}
query_path = self._service_url + "/cubes2variable"
return await tools.get_database_response(query_path, query_parameters)
async def jobs(
self,
object_name: str,
search_by: enums.JobCriteria,
sort_by: enums.JobCriteria,
job_type: enums.JobType = enums.JobType.ALL,
result_count: int = 100,
) -> dict:
"""
Get a list of the jobs that match the parameters
:param object_name: The identifier code of the job. The usage of an asterisk
(``*``) is permitted as wildcard. This value acts as filter, only showing the
jobs matching this code
:type object_name: str
:param search_by: Criteria which shall be applied to the object_name
:type search_by: enums.JobCriteria
:param sort_by: Criteria by which the output shall be sorted
:type sort_by: enums.JobCriteria
:param job_type: The type of jobs which shall be returned, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.JobType.ALL`
:type job_type: enums.JobType
:param result_count: The maximal amount of results which are returned by the
database, defaults to 100
:type result_count: int
:rtype: dict, os.PathLike
:raises exceptions.GENESISPermissionError: The supplied account does not have the
permissions to this resource.
:raises ValueError: One of the parameters does not contain a valid value. Please check
the message of the exception for further information
"""
if " " in object_name:
raise ValueError("The object_name parameter may not contain whitespaces")
if "*" in object_name:
raise ValueError(
"The object_name parameter may not contain asterisks. Wildcards are "
"not permitted"
)
if len(object_name) == 0:
raise ValueError("The object_name parameter may not be empty")
if len(object_name) > 50:
raise ValueError("The object_name parameter may not exceed 50 characters")
if type(search_by) is not enums.JobCriteria:
raise ValueError(
f"The search_by parameter only accepts values from the following enumeration: "
f"{repr(enums.JobCriteria)}"
)
if type(sort_by) is not enums.JobCriteria:
raise ValueError(
f"The sort_by parameter only accepts values from the following enumeration: "
f"{repr(enums.JobCriteria)}"
)
if type(job_type) is not enums.JobType:
raise ValueError(
f"The job_type parameter only accepts values from the following enumeration: "
f"{repr(enums.JobType)}"
)
if result_count < 1:
raise ValueError("The result_count parameter value may not be below 0")
if result_count > 2500:
raise ValueError("The result_count parameter value may not exceed 2500")
query_parameter = self._base_parameter | {
'selection': object_name,
'searchcriterion': search_by.value,
'sortcriterion': sort_by.value,
'type': job_type.value,
'pagelength': result_count
}
query_path = self._service_url + '/jobs'
return await tools.get_database_response(query_path, query_parameter)
async def modified_data(
self,
object_filter: str,
object_type: enums.ObjectType = enums.ObjectType.ALL,
updated_after: datetime.date = datetime.date.today() - datetime.timedelta(days=-7),
result_count: int = 100
) -> dict:
"""
**Due to an error in the database the parameter** ``result_count`` **is ignored by the
database**
Get a list of modified objects which were modified or uploaded after ``updated_after``.
The following objects are returned by this query:
- Tables
- Statistics
- Statistic updates
:param object_filter: The identifier code of the object. The usage of an asterisk
(``*``) is permitted as wildcard. This value acts as filter, only showing the
jobs matching this code
:type object_filter: str
:param object_type: The type of object that shall be listed
Allowed types (enums):
- :py:enum:mem:`~genesis_api_wrapper.enums.ObjectType.ALL`
- :py:enum:mem:`~genesis_api_wrapper.enums.ObjectType.TABLES`
- :py:enum:mem:`~genesis_api_wrapper.enums.ObjectType.STATISTICS`
- :py:enum:mem:`~genesis_api_wrapper.enums.ObjectType.STATISTIC_UPDATE`
:type object_type: enums.ObjectType
:param updated_after: The date after which the object needs to be modified or uploaded to
be returned by the database, defaults to 7 days before today
:type updated_after: datetime.date
:param result_count: The number of results that will be returned
:type result_count: int
"""
if " " in object_filter:
raise ValueError("The object_filter parameter may not contain whitespaces")
if len(object_filter) == 0:
raise ValueError("The object_filter parameter may not be empty")
if len(object_filter) > 50:
raise ValueError("The object_filter parameter may not exceed 50 characters")
if type(object_type) is not enums.ObjectType:
raise ValueError(
f"The object_type parameter only accepts values from the following enumeration: "
f"{repr(enums.ObjectType)}"
)
if object_type not in [enums.ObjectType.ALL, enums.ObjectType.TABLES,
enums.ObjectType.STATISTICS, enums.ObjectType.STATISTICS_UPDATE]:
raise ValueError(
f"The supplied object_type ({object_type}) is not allowed at this resource"
)
if updated_after > datetime.date.today():
raise ValueError(
f'The updated_after parameter is in the future'
)
# ==== Build the query data ====
query_path = self._service_url + '/modifieddata'
query_parameters = self._base_parameter | {
'selection': object_filter,
'type': object_type.value,
'date': tools.convert_date_to_string(updated_after),
'pagelength': result_count
}
# ==== Return the query data ====
return await tools.get_database_response(query_path, query_parameters)
async def quality_signs(self) -> dict:
"""
Get the list of quality signs from the database
:return: The Response containing the quality signs present in the database
:rtype: dict
"""
query_path = self._service_url + '/qualitysigns'
query_parameters = self._base_parameter
return await tools.get_database_response(query_path, query_parameters)
async def results(
self,
object_name: str,
storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100
) -> dict:
"""
Get a list of result tables matching the ``object_name``
:param object_name: The identifier code of the result tables. The usage of an asterisk
(``*``) is permitted as wildcard
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage, optional
:param result_count: The maximal amount of results which are returned by the database,
defaults to 100
:type result_count: int, optional
:return: The response from the database parsed into a dict. If the ``Content-Type``
header indicated a non-JSON response the response is stored in a temporary file and
the file path will be returned
:rtype: dict, os.PathLike
:raises exceptions.GENESISPermissionError: The supplied account does not have the
permissions to access data cubes.
:raises ValueError: One of the parameters does not contain a valid value. Please check
the message of the exception for further information
"""
if " " in object_name:
raise ValueError("The object_name parameter may not contain whitespaces")
if len(object_name) == 0:
raise ValueError("The object_name parameter may not be empty")
if len(object_name) > 10:
raise ValueError("The object_name parameter may not exceed 10 characters")
if type(storage_location) is not enums.ObjectStorage:
raise ValueError(
f"The storage_location parameter only accepts "
f"{repr(enums.ObjectStorage)} values"
)
if result_count < 1:
raise ValueError("The result_count parameter value may not be below 0")
if result_count > 2500:
raise ValueError("The result_count parameter value may not exceed 2500")
# ==== Build the query path and parameters ====
query_path = self._service_url + '/results'
query_parameters = self._base_parameter | {
'selection': object_name,
'area': storage_location.value,
'pagelength': result_count
}
# ==== Get the response ====
return await tools.get_database_response(query_path, query_parameters)
async def statistics(
self,
object_name: str,
storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
search_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
sort_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
result_count: int = 100
) -> dict:
"""
Get a list of statistics matching the supplied code
:param object_name: The identifier code of the data cubes. The usage of an asterisk
(``*``) is permitted as wildcard
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage, optional
:param search_by: Criteria which shall be applied to the ``object_name``, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.GenericCriteria.CODE`
:type search_by: enums.GenericCriteria, optional
:param sort_by: Criteria by which the result shall be sorted, defaults to
:py:enum:mem:`~genesis_api_wrapper.enums.GenericCriteria.CODE`
:type sort_by: enums.GenericCriteria, optional
:param result_count: The number of results that the response shall contain at it's maximum
:type result_count: int
:return: The response from the database parsed into a dict. If the ``Content-Type``
header indicated a non-JSON response the response is stored in a temporary file and
the file path will be returned
:rtype: dict, os.PathLike
:raises exceptions.GENESISPermissionError: The supplied account does not have the
permissions to access data cubes.
:raises ValueError: One of the parameters does not contain a valid value. Please check
the message of the exception for further information
"""
if " " in object_name:
raise ValueError("The object_name parameter may not contain whitespaces")
if len(object_name) == 0:
raise ValueError("The object_name parameter may not be empty")
if len(object_name) > 15:
raise ValueError("The object_name parameter may not exceed 15 characters")
if type(storage_location) is not enums.ObjectStorage:
raise ValueError(
f"The storage_location parameter only accepts "
f"{repr(enums.ObjectStorage)} values"
)
if type(search_by) is not enums.GenericCriteria:
raise ValueError(
f"The search_by parameter only accepts "
f"{repr(enums.GenericCriteria)} values"
)
if type(sort_by) is not enums.GenericCriteria:
raise ValueError(
f"The sort_by parameter only accepts "
f"{repr(enums.GenericCriteria)} values"
)
if result_count < 1:
raise ValueError("The result_count parameter value may not be below 0")
if result_count > 2500:
raise ValueError("The result_count parameter value may not exceed 2500")
# ==== Build query path and parameters ====
query_path = self._service_url + '/statistics'
query_parameters = self._base_parameter | {
'selection': object_name,
'searchcriterion': search_by.value,
'sortcriterion': sort_by.value,
'pagelength': result_count
}
return await tools.get_database_response(query_path, query_parameters)
async def statistics2variable(
self,
variable_name: str,
statistic_selector: str = None,
search_by: enums.StatisticCriteria = enums.StatisticCriteria.CODE,
sort_by: enums.StatisticCriteria = enums.StatisticCriteria.CODE,
object_area: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
):
"""Get a list of statistics which are referenced by the selected variable
:param variable_name: The name of the variable [required]
:type variable_name: str
:param statistic_selector: Filter for the statistics by the code of them, [optional,
stars allowed to wildcard, max. length 15]
:type statistic_selector: str
:param search_by: The field on which the code shall be applied, [optional, defaults
to `GENESISenums.StatisticCriteria.CODE`]
:type search_by: enums.StatisticCriteria
:param sort_by: The field by which the results are to be sorted, [optional, defaults
to `GENESISenums.StatisticCriteria.CODE`]
:type sort_by: enums.StatisticCriteria
:param object_area: The area in which the object is stored
:type object_area: enums.ObjectStorage
:param result_count: The number of results which are returned by the request
:type result_count: int
:return: The response returned by the server
"""
if variable_name is None:
raise ValueError("The variable name needs to be set to run a successful query")
if not 1 <= len(variable_name.strip()) <= 15:
raise ValueError("The variable names length needs to be between 1 and 15 signs")
if statistic_selector and not (1 <= len(statistic_selector.strip()) <= 15):
raise ValueError("The selectors length may not exceed 15 characters")
# Create the parameters object
_param = self._base_parameter | {
"name": variable_name,
"selection": "" if statistic_selector is None else statistic_selector,
"searchcriterion": search_by.value,
"sortcriterion": sort_by.value,
"pagelength": result_count,
"area": object_area.value,
}
_url = self._service_url + "/statistics2variable"
return await tools.get_database_response(_url, _param)
async def tables(
self,
table_selector: str,
object_area: enums.ObjectStorage = enums.ObjectStorage.ALL,
sort_by: enums.TableCriteria = enums.TableCriteria.CODE,
result_count: int = 100,
) -> dict:
"""Get a list of tables matching the selector from the selected object area
:param table_selector: The code of the table [required, stars (*) allowed for wildcards]
:param object_area: The area in which the table is stored [defaults to ALL]
:param sort_by: The criteria by which the results shall be sorted [defaults to CODE]
:param result_count: The number of results that shall be returned
:return: A list of tables matching the request
"""
if table_selector and not (1 <= len(table_selector.strip()) <= 15):
raise ValueError(
"The table selector needs to be at least 1 character and max 15 " "characters"
)
_param = self._base_parameter | {
"selection": table_selector,
"area": object_area.value,
"searchcriterion": "Code",
"sortcriterion": sort_by.value,
"pagelength": result_count,
}
_url = self._service_url + "/tables"
return await tools.get_database_response(_url, _param)
async def tables2statistics(
self,
statistics_name: str,
table_selector: str = None,
object_area: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""Get a list of tables matching the table selector which are assigned to the
:param statistics_name: Name of the statistic [required, 1-15 characters]
:param table_selector: Filter for the tables code [optional, wildcards allowed]
:param object_area: The location of the statistic/tables
:param result_count: The number of tables in the response
:return:
"""
if statistics_name is None:
raise ValueError("The name of the statistic is required to get the tables")
if not 1 <= len(statistics_name.strip()) <= 15:
raise ValueError("The length of the statistics name needs to be between 1 and 15")
if table_selector and not (1 <= len(table_selector.strip()) <= 15):
raise ValueError(
"The table selector needs to be at least 1 character and max 15 " "characters"
)
_param = self._base_parameter | {
"name": statistics_name,
"selection": table_selector,
"area": object_area.value,
"pagelength": result_count,
}
_url = self._service_url + "/tables2statistic"
return await tools.get_database_response(_url, _param)
async def tables2variable(
self,
variable_name: str,
table_selector: str = None,
object_area: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""Get a list of tables matching the table selector which are assigned to the
:param variable_name: Name of the statistic [required, 1-15 characters]
:param table_selector: Filter for the tables code [optional, wildcards allowed]
:param object_area: The location of the statistic/tables
:param result_count: The number of tables in the response
:return:
"""
if variable_name is None:
raise ValueError("The name of the statistic is required to get the tables")
if not 1 <= len(variable_name) <= 15:
raise ValueError("The length of the statistics name needs to be between 1 and 15")
if table_selector and not (1 <= len(table_selector.strip()) <= 15):
raise ValueError(
"The table selector needs to be at least 1 character and max 15 " "characters"
)
_param = self._base_parameter | {
"name": variable_name,
"selection": table_selector,
"area": object_area.value,
"pagelength": result_count,
}
_url = self._service_url + "/tables2variable"
return await tools.get_database_response(_url, _param)
async def terms(self, term_selector: str, result_count: int = 100):
"""Get a list of terms according to the selector
:param term_selector: The selector for the terms [required, wildcards allowed]
:param result_count: The number of terms which shall be returned
:return: The parsed response from the server
"""
if term_selector is None:
raise ValueError("The selector for the terms is a required parameter")
if not 1 <= len(term_selector.strip()) <= 15:
raise ValueError("The length of the selector needs to be between 1 and 15")
_param = self._base_parameter | {"selection": term_selector, "pagelength": result_count}
_url = self._service_url + "/terms"
return await tools.get_database_response(_url, _param)
async def timeseries(
self,
timeseries_selector: str,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""Get a list of timeseries according to the selector and the location of the object
:param timeseries_selector: The selector for the timeseries [required, wildcards
allowed]
:param object_location: The area in which the object is stored [default:
``enums.ObjectStorage.ALL``]
:param result_count: The number of results that shall be returned
:return: The list of found timeseries
"""
if timeseries_selector is None:
raise ValueError("The selector is required for a successful database request")
if not 1 <= len(timeseries_selector.strip()) <= 15:
raise ValueError(
"The length of the selector needs to be between 1 and 15 " "characters"
)
_param = self._base_parameter | {
"selection": timeseries_selector,
"area": object_location.value,
"pagelength": result_count,
}
_url = self._service_url + "/timeseries"
return await tools.get_database_response(_url, _param)
async def timeseries2statistic(
self,
statistic_name: str,
timeseries_selector: typing.Optional[str] = None,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
):
"""Get a list of timeseries which are related to the selected statistic
:param statistic_name: Code of the statistic [required, length: 1-15 characters]
:param timeseries_selector: Filter for the timeseries by their code [optional,
wildcards allowed]
:param object_location: The storage location of the object
:param result_count: The number of results that shall be returned
:return: A response containing the list of timeseries which match the supplied
parameters
"""
if statistic_name is None:
raise ValueError("The name of the statistic is a required parameter")
if timeseries_selector and not (1 <= len(timeseries_selector.strip()) <= 15):
raise ValueError(
"If a timeseries_selector is supplied its length may not exceed " "15 characters"
)
# Build the query parameters
param = self._base_parameter | {
"name": statistic_name,
"selection": "" if timeseries_selector is None else timeseries_selector,
"area": object_location.value,
"pagelength": result_count,
}
url = self._service_url + "/timeseries2statistic"
return await tools.get_database_response(url, param)
async def timeseries2variable(
self,
variable_name: str,
timeseries_selector: typing.Optional[str] = None,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
result_count: int = 100,
) -> dict:
"""Get a list of timeseries which are related to the specified variable
:param variable_name: The code of the variable [required]
:param timeseries_selector: A filter for the returned timeseries [optional, wildcards
allowed]
:param object_location: The storage location in which the search shall be executed [
optional, defaults to ``enums.ObjectStorage.ALL``]
:param result_count: The number of results that shall be returned
:return: A parsed response containing the list of timeseries, if any were found
"""
if variable_name is None:
raise ValueError("The variable_name is a required parameter")
if not (1 <= len(variable_name.strip()) <= 15):
raise ValueError("The length of the variable name may not exceed 15 characters")
if timeseries_selector and not (1 <= len(timeseries_selector.strip()) <= 15):
raise ValueError(
"If a timeseries_selector is supplied its length may not exceed " "15 characters"
)
# Build the query parameters
_query_parameter = self._base_parameter | {
"name": variable_name,
"selection": "" if timeseries_selector is None else timeseries_selector,
"area": object_location.value,
"pagelength": result_count,
}
_url = self._service_url + "/timeseries2variable"
return await tools.get_database_response(_url, _query_parameter)
async def values(
self,
value_filter: str,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
search_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
sort_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
result_count: int = 100,
) -> dict:
"""Get a list of values specified by the filter
:param value_filter: The filter for the value identifications [optional, wildcards
allowed]
:param object_location: The storage location which shall be used during the search [
optional, defaults to ``GenericCriteria.CODE``]
:param search_by: The criteria which is used in combination to the value_filter [
optional, defaults to ``GenericCriteria.CODE``]
:param sort_by: The criteria by which the results are sorted [optional, defaults to
``GenericCriteria.CODE``]
:param result_count: The number of results returned
:return: A parsed response containing the list of values
"""
# Check the received variables
if value_filter is None:
raise ValueError("The value_filter is a required parameter")
if not 1 <= len(value_filter.strip()) <= 15:
raise ValueError(
"The length of the value_filter needs to be at least 1 character "
"and may not exceed 15 characters"
)
if not 1 <= result_count <= 2500:
raise ValueError(
"The number of results returned needs to be greater than 1, "
"but may not exceed 2500"
)
# Build the query parameters
params = self._base_parameter | {
"selection": value_filter,
"area": object_location.value,
"searchcriterion": search_by.value,
"sortcriterion": sort_by.value,
"pagelength": result_count,
}
_url = self._service_url + "/values"
return await tools.get_database_response(_url, params)
async def values2variable(
self,
variable_name: str,
value_filter: typing.Optional[str] = None,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
search_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
sort_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
result_count: int = 100,
) -> dict:
"""Get a list of characteristic values for the supplied variable
:param variable_name: The code of the variable
:param value_filter: A filter for the returned values [optional, wildcards allowed]
:param object_location: The storage location of the variable
:param search_by: Criteria which is applied to the ``value_filter``
:param sort_by: Criteria which is used to sort the results
:param result_count: The number of characteristic values which may be returned
:return: A parsed response from the server containing the list of characteristic values
"""
# Check if the variable name is set correctly
if not variable_name or len(variable_name.strip()) == 0:
raise ValueError("The variable_name is a required parameter and may not be empty")
if not (1 <= len(variable_name.strip()) <= 15):
raise ValueError(
"The length of the variable_name may not exceed 15 characters "
"and may not be below 1 character"
)
if "*" in variable_name:
raise ValueError("The variable_name may not contain any wildcards (*)")
# Check the value filter
if value_filter and not (1 <= len(value_filter.strip()) <= 15):
raise ValueError(
"The length of the value_filter may not exceed 15 characters and "
"may not be below 1"
)
# Check the number of results returned
if not 1 <= result_count <= 2500:
raise ValueError(
"The number of results returned needs to be greater than 1, "
"but may not exceed 2500"
)
# Create the query parameter
_param = self._base_parameter | {
"name": variable_name,
"selection": value_filter,
"area": object_location.value,
"searchcriterion": search_by.value,
"sortcriterion": sort_by.value,
"pagelength": result_count,
}
# Build the url for the call
_url = self._service_url + "/values2variable"
# Make the call and await the response
return await tools.get_database_response(_url, _param)
async def variables(
self,
variable_filter: str,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
search_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
sort_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
variable_type: enums.VariableType = enums.VariableType.ALL,
result_count: int = 100,
) -> dict:
"""Get a list of variables matching the filter and object location
:param variable_filter: Identification Code of the variable [required, wildcards
allowed]
:param object_location: The storage location of the object [optional]
:param search_by: Criteria which is applied to the variable filter [optional]
:param sort_by: Criteria by which the result is sorted [optional]
:param variable_type: The type of variable [optional]
:param result_count: The number of results that may be returned [optional]
:return: A parsed response from the server containing the variables
"""
# Check if the filter is supplied correctly
if not variable_filter or len(variable_filter.strip()) == 0:
raise ValueError("The variable_filter is a required parameter any may not be empty")
if not (1 <= len(variable_filter.strip()) <= 6):
raise ValueError("The variable_filter may only contain up to 6 characters")
# Check if the result count is set properly
if not (1 <= result_count <= 2500):
raise ValueError("The number of possible results needs to be between 1 and 2500")
# Build the query parameters
_param = self._base_parameter | {
"selection": variable_filter,
"area": object_location.value,
"searchcriterion": search_by.value,
"sortcriterion": sort_by.value,
"type": variable_type.value,
"pagelength": result_count,
}
# Build the url
_url = self._service_url + "/variables"
# Return the parsed result
return await tools.get_database_response(_url, _param)
async def variables2statistic(
self,
statistic_name: str,
variable_filter: typing.Optional[str] = None,
object_location: enums.ObjectStorage = enums.ObjectStorage.ALL,
search_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
sort_by: enums.GenericCriteria = enums.GenericCriteria.CODE,
variable_type: enums.VariableType = enums.VariableType.ALL,
result_count: int = 100,
) -> dict:
"""Get a list of variables related to the supplied statistic
:param statistic_name: The identification of the statistic [required]
:param variable_filter: Filter for the returned variables [optional, wildcards allowed]
:param object_location: Storage location which is used for the search [optional]
:param search_by: Criteria which is applied to the variable_filter [optional]
:param sort_by: Criteria specifying how the results are to be sorted [optional]
:param variable_type: The type of variables that shall be returned [optional]
:param result_count: Max. amount of results returned by the server [optional]
:return: A parsed response containing a list of variables
"""
# Check if the statistic_name is set correctly
if not statistic_name or len(statistic_name.strip()) == 0:
raise ValueError("The statistic_name is a required parameter")
if not (1 <= len(statistic_name.strip()) <= 15):
raise ValueError("The length of statistic_name may not exceed 15 characters")
if "*" in statistic_name:
raise ValueError("The statistic_name may not contain wildcards (*)")
# Check if the variable_filter is set correctly if set
if variable_filter and not (1 <= len(variable_filter.strip()) <= 6):
raise ValueError(
"The variable_filter may not exceed the length of 6 characters, "
"if it is supplied"
)
# Build the query parameters
_param = self._base_parameter | {
"name": statistic_name,
"selection": variable_filter,
"area": object_location.value,
"searchcriterion": search_by.value,
"sortcriterion": sort_by.value,
"type": variable_type.value,
"pagelength": result_count,
}
# Build the query path
_path = self._service_url + "/variables2statistic"
return await tools.get_database_response(_path, _param)
| [
"datetime.date.today",
"datetime.timedelta"
] | [((15395, 15416), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (15414, 15416), False, 'import datetime\n'), ((15419, 15446), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-7)'}), '(days=-7)\n', (15437, 15446), False, 'import datetime\n'), ((17879, 17900), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (17898, 17900), False, 'import datetime\n')] |
# coding: utf-8
"""加密算法:公钥(私钥)加密,私钥解密"""
from Crypto.PublicKey import RSA
from Crypto import Random
DATA = 'Hello, word!'
PRIVATE_KEY_PEM = """-----<KEY>"""
PUBLIC_KEY_PEM = """-----<KEY>"""
def _encrypt_by_public():
random_func = Random.new().read
public_key = RSA.importKey(PUBLIC_KEY_PEM)
encrypted = public_key.encrypt(DATA, random_func)
return encrypted
def _encrypt_by_private():
random_func = Random.new().read
private_key = RSA.importKey(PRIVATE_KEY_PEM)
encrypted = private_key.encrypt(DATA, random_func)
return encrypted
def _decrypt_by_private(msg_encrypt):
private_key = RSA.importKey(PRIVATE_KEY_PEM)
decrypted = private_key.decrypt(msg_encrypt)
return decrypted
def _decrypt_by_public_err(msg_encrypt):
"""无效"""
public_key = RSA.importKey(PUBLIC_KEY_PEM)
decrypted = public_key.decrypt(msg_encrypt)
return decrypted
if __name__ == '__main__':
print(DATA, _decrypt_by_private(_encrypt_by_public()))
print(DATA, _decrypt_by_private(_encrypt_by_private()))
try:
print(DATA, _decrypt_by_public_err(_encrypt_by_public()))
except TypeError as e1:
print(DATA, e1)
try:
print(DATA, _decrypt_by_public_err(_encrypt_by_private()))
except TypeError as e2:
print(DATA, e2)
| [
"Crypto.Random.new",
"Crypto.PublicKey.RSA.importKey"
] | [((275, 304), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', (['PUBLIC_KEY_PEM'], {}), '(PUBLIC_KEY_PEM)\n', (288, 304), False, 'from Crypto.PublicKey import RSA\n'), ((463, 493), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', (['PRIVATE_KEY_PEM'], {}), '(PRIVATE_KEY_PEM)\n', (476, 493), False, 'from Crypto.PublicKey import RSA\n'), ((628, 658), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', (['PRIVATE_KEY_PEM'], {}), '(PRIVATE_KEY_PEM)\n', (641, 658), False, 'from Crypto.PublicKey import RSA\n'), ((802, 831), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', (['PUBLIC_KEY_PEM'], {}), '(PUBLIC_KEY_PEM)\n', (815, 831), False, 'from Crypto.PublicKey import RSA\n'), ((240, 252), 'Crypto.Random.new', 'Random.new', ([], {}), '()\n', (250, 252), False, 'from Crypto import Random\n'), ((427, 439), 'Crypto.Random.new', 'Random.new', ([], {}), '()\n', (437, 439), False, 'from Crypto import Random\n')] |
from django.contrib.auth import get_user_model
from rest_framework import mixins
from rest_framework.viewsets import GenericViewSet
from users.serializers import UserSerializer
class UserViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
lookup_field = "uuid"
| [
"django.contrib.auth.get_user_model"
] | [((280, 296), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (294, 296), False, 'from django.contrib.auth import get_user_model\n')] |
import numpy
import chainer
from chainer.backends import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.math import linear_interpolate
from chainer import link
from chainer.links.connection import linear
class MGUBase(link.Chain):
def __init__(self, n_inputs, n_units):
super(MGUBase, self).__init__()
with self.init_scope():
self.W_f = linear.Linear(n_inputs + n_units, n_units)
self.W_h = linear.Linear(n_inputs + n_units, n_units)
def _call_mgu(self, h, x):
f = sigmoid.sigmoid(self.W_f(concat.concat([h, x])))
h_bar = tanh.tanh(self.W_h(concat.concat([f * h, x])))
h_new = linear_interpolate.linear_interpolate(f, h_bar, h)
return h_new
class StatelessMGU(MGUBase):
forward = MGUBase._call_mgu
class StatefulMGU(MGUBase):
def __init__(self, in_size, out_size):
super(StatefulMGU, self).__init__(in_size, out_size)
self._state_size = out_size
self.reset_state()
def _to_device(self, device, skip_between_cupy_devices=False):
# Overrides Link._to_device
# TODO(niboshi): Avoid forcing concrete links to override _to_device
device = chainer.get_device(device)
super(StatefulMGU, self)._to_device(
device, skip_between_cupy_devices=skip_between_cupy_devices)
if self.h is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.h, cuda.ndarray)):
self.h.to_device(device)
return self
def set_state(self, h):
assert isinstance(h, chainer.Variable)
h_ = h
if self.xp is numpy:
h_.to_cpu()
else:
h_.to_gpu()
self.h = h_
def reset_state(self):
self.h = None
def forward(self, x):
if self.h is None:
n_batch = x.shape[0]
dtype = chainer.get_dtype()
h_data = self.xp.zeros(
(n_batch, self._state_size), dtype=dtype)
h = chainer.Variable(h_data)
else:
h = self.h
self.h = self._call_mgu(h, x)
return self.h
| [
"chainer.functions.array.concat.concat",
"chainer.Variable",
"chainer.links.connection.linear.Linear",
"chainer.get_device",
"chainer.functions.math.linear_interpolate.linear_interpolate",
"chainer.get_dtype"
] | [((773, 823), 'chainer.functions.math.linear_interpolate.linear_interpolate', 'linear_interpolate.linear_interpolate', (['f', 'h_bar', 'h'], {}), '(f, h_bar, h)\n', (810, 823), False, 'from chainer.functions.math import linear_interpolate\n'), ((1305, 1331), 'chainer.get_device', 'chainer.get_device', (['device'], {}), '(device)\n', (1323, 1331), False, 'import chainer\n'), ((492, 534), 'chainer.links.connection.linear.Linear', 'linear.Linear', (['(n_inputs + n_units)', 'n_units'], {}), '(n_inputs + n_units, n_units)\n', (505, 534), False, 'from chainer.links.connection import linear\n'), ((558, 600), 'chainer.links.connection.linear.Linear', 'linear.Linear', (['(n_inputs + n_units)', 'n_units'], {}), '(n_inputs + n_units, n_units)\n', (571, 600), False, 'from chainer.links.connection import linear\n'), ((2053, 2072), 'chainer.get_dtype', 'chainer.get_dtype', ([], {}), '()\n', (2070, 2072), False, 'import chainer\n'), ((2183, 2207), 'chainer.Variable', 'chainer.Variable', (['h_data'], {}), '(h_data)\n', (2199, 2207), False, 'import chainer\n'), ((670, 691), 'chainer.functions.array.concat.concat', 'concat.concat', (['[h, x]'], {}), '([h, x])\n', (683, 691), False, 'from chainer.functions.array import concat\n'), ((729, 754), 'chainer.functions.array.concat.concat', 'concat.concat', (['[f * h, x]'], {}), '([f * h, x])\n', (742, 754), False, 'from chainer.functions.array import concat\n')] |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Member, Band
from rockband.serializers import MemberSerializer
MEMBERS_URL = reverse('rockband:member-list')
class PublicMembersApiTests(TestCase):
"""
Test the publicly available ingredients API
"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""
Test that login is required to access the endpoint
:return:
"""
res = self.client.get(MEMBERS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateMemberApiTests(TestCase):
"""
Test the private member API
"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_member_list(self):
"""
Test retrieving a list of members
:return:
"""
Member.objects.create(user=self.user, name='Hendrix')
Member.objects.create(user=self.user, name='Satriani')
res = self.client.get(MEMBERS_URL)
members = Member.objects.all().order_by('-name')
serializer = MemberSerializer(members, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_members_limited_to_user(self):
"""
Test that members for the authenticated user are returned
:return:
"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
Member.objects.create(user=user2, name='Lemmy')
member = Member.objects.create(user=self.user, name='Elvis')
res = self.client.get(MEMBERS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], member.name)
def test_create_member_successful(self):
"""
Test create a new ingredient
:return:
"""
payload = {'name': 'Petrucci'}
self.client.post(MEMBERS_URL, payload)
exists = Member.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_member_invalid(self):
"""
Test creating invalid member fails
:return:
"""
payload = {'name': ''}
res = self.client.post(MEMBERS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_members_assigned_to_bands(self):
"""
Test filtering members by those assigned to bands
:return:
"""
member1 = Member.objects.create(
user=self.user, name='Joakim'
)
member2 = Member.objects.create(
user=self.user, name='Tony'
)
band = Band.objects.create(
title='Sabaton',
band_members=5,
tickets=55.5,
user=self.user
)
band.members.add(member1)
res = self.client.get(MEMBERS_URL, {'assigned_only': 1})
serializer1 = MemberSerializer(member1)
serializer2 = MemberSerializer(member2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def Test_retrieve_members_assigned_unique(self):
"""
Test filtering members by assigned returns unique items
:return:
"""
member = Member.objects.create(
user=self.user, name='Joakim'
)
Member.objects.create(
user=self.user, name='Tony'
)
band1 = Band.objects.create(
title='Sabaton',
band_members=5,
tickets=55.5,
user=self.user
)
band1.members.add(member)
band2 = Band.objects.create(
title='Sonata',
band_members=5,
tickets=45.5,
user=self.user
)
band2.members.add(member)
res = self.client.get(MEMBERS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| [
"django.contrib.auth.get_user_model",
"django.urls.reverse",
"core.models.Member.objects.all",
"rest_framework.test.APIClient",
"core.models.Member.objects.filter",
"core.models.Band.objects.create",
"core.models.Member.objects.create",
"rockband.serializers.MemberSerializer"
] | [((294, 325), 'django.urls.reverse', 'reverse', (['"""rockband:member-list"""'], {}), "('rockband:member-list')\n", (301, 325), False, 'from django.urls import reverse\n'), ((475, 486), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (484, 486), False, 'from rest_framework.test import APIClient\n'), ((872, 883), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (881, 883), False, 'from rest_framework.test import APIClient\n'), ((1183, 1236), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Hendrix"""'}), "(user=self.user, name='Hendrix')\n", (1204, 1236), False, 'from core.models import Member, Band\n'), ((1245, 1299), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Satriani"""'}), "(user=self.user, name='Satriani')\n", (1266, 1299), False, 'from core.models import Member, Band\n'), ((1423, 1459), 'rockband.serializers.MemberSerializer', 'MemberSerializer', (['members'], {'many': '(True)'}), '(members, many=True)\n', (1439, 1459), False, 'from rockband.serializers import MemberSerializer\n'), ((1846, 1893), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'user2', 'name': '"""Lemmy"""'}), "(user=user2, name='Lemmy')\n", (1867, 1893), False, 'from core.models import Member, Band\n'), ((1911, 1962), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Elvis"""'}), "(user=self.user, name='Elvis')\n", (1932, 1962), False, 'from core.models import Member, Band\n'), ((2992, 3044), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Joakim"""'}), "(user=self.user, name='Joakim')\n", (3013, 3044), False, 'from core.models import Member, Band\n'), ((3085, 3135), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Tony"""'}), "(user=self.user, name='Tony')\n", (3106, 3135), False, 'from core.models import Member, Band\n'), ((3173, 3260), 'core.models.Band.objects.create', 'Band.objects.create', ([], {'title': '"""Sabaton"""', 'band_members': '(5)', 'tickets': '(55.5)', 'user': 'self.user'}), "(title='Sabaton', band_members=5, tickets=55.5, user=\n self.user)\n", (3192, 3260), False, 'from core.models import Member, Band\n'), ((3437, 3462), 'rockband.serializers.MemberSerializer', 'MemberSerializer', (['member1'], {}), '(member1)\n', (3453, 3462), False, 'from rockband.serializers import MemberSerializer\n'), ((3485, 3510), 'rockband.serializers.MemberSerializer', 'MemberSerializer', (['member2'], {}), '(member2)\n', (3501, 3510), False, 'from rockband.serializers import MemberSerializer\n'), ((3790, 3842), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Joakim"""'}), "(user=self.user, name='Joakim')\n", (3811, 3842), False, 'from core.models import Member, Band\n'), ((3873, 3923), 'core.models.Member.objects.create', 'Member.objects.create', ([], {'user': 'self.user', 'name': '"""Tony"""'}), "(user=self.user, name='Tony')\n", (3894, 3923), False, 'from core.models import Member, Band\n'), ((3962, 4049), 'core.models.Band.objects.create', 'Band.objects.create', ([], {'title': '"""Sabaton"""', 'band_members': '(5)', 'tickets': '(55.5)', 'user': 'self.user'}), "(title='Sabaton', band_members=5, tickets=55.5, user=\n self.user)\n", (3981, 4049), False, 'from core.models import Member, Band\n'), ((4153, 4239), 'core.models.Band.objects.create', 'Band.objects.create', ([], {'title': '"""Sonata"""', 'band_members': '(5)', 'tickets': '(45.5)', 'user': 'self.user'}), "(title='Sonata', band_members=5, tickets=45.5, user=self\n .user)\n", (4172, 4239), False, 'from core.models import Member, Band\n'), ((1363, 1383), 'core.models.Member.objects.all', 'Member.objects.all', ([], {}), '()\n', (1381, 1383), False, 'from core.models import Member, Band\n'), ((2400, 2459), 'core.models.Member.objects.filter', 'Member.objects.filter', ([], {'user': 'self.user', 'name': "payload['name']"}), "(user=self.user, name=payload['name'])\n", (2421, 2459), False, 'from core.models import Member, Band\n'), ((904, 920), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (918, 920), False, 'from django.contrib.auth import get_user_model\n'), ((1742, 1758), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1756, 1758), False, 'from django.contrib.auth import get_user_model\n')] |
from slave.playground.bots import BotInformation
from slave.lib.bots import BotBasic, BotV2
config = {
'host': 'chat.freenode.net',
'port': 6667,
'channel': "#slavebotpool666",
'boss_name': 'boss666',
'bot_prefix': "SLAVEBOT"
}
BotInformation.read_config_from_dict(config)
BotInformation.use_other_bot_commands(BotV2)
BotInformation.start(safe=True) | [
"slave.playground.bots.BotInformation.start",
"slave.playground.bots.BotInformation.read_config_from_dict",
"slave.playground.bots.BotInformation.use_other_bot_commands"
] | [((249, 293), 'slave.playground.bots.BotInformation.read_config_from_dict', 'BotInformation.read_config_from_dict', (['config'], {}), '(config)\n', (285, 293), False, 'from slave.playground.bots import BotInformation\n'), ((296, 340), 'slave.playground.bots.BotInformation.use_other_bot_commands', 'BotInformation.use_other_bot_commands', (['BotV2'], {}), '(BotV2)\n', (333, 340), False, 'from slave.playground.bots import BotInformation\n'), ((341, 372), 'slave.playground.bots.BotInformation.start', 'BotInformation.start', ([], {'safe': '(True)'}), '(safe=True)\n', (361, 372), False, 'from slave.playground.bots import BotInformation\n')] |
##############################################################################
# Copyright (c) 2017 <NAME> <<EMAIL>>, Red Hat
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# python generate-sha256.py --project /home/user/opnfv/infra
# output made to working directory, file `output.yaml`
import os
import sys
import hashlib
import argparse
from binaryornot.check import is_binary
hasher = hashlib.sha256()
parser = argparse.ArgumentParser()
parser.add_argument('--project', help="Full path to project folder",
required=True)
args = parser.parse_args()
ignore_dirs = ['.git']
sys.stdout = open('output.yaml', 'w')
print("binaries:")
for root, dirs, files in os.walk(args.project):
dirs[:] = [d for d in dirs if d not in ignore_dirs]
for file in files:
full_path = os.path.join(root, file)
if is_binary(full_path):
with open(full_path, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
split_path = full_path.split(args.project + '/', 1)[-1]
print(" {}:".format(split_path))
sum = hasher.hexdigest()
print(" - {}".format(sum))
| [
"hashlib.sha256",
"argparse.ArgumentParser",
"os.path.join",
"binaryornot.check.is_binary",
"os.walk"
] | [((670, 686), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (684, 686), False, 'import hashlib\n'), ((696, 721), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (719, 721), False, 'import argparse\n'), ((960, 981), 'os.walk', 'os.walk', (['args.project'], {}), '(args.project)\n', (967, 981), False, 'import os\n'), ((1082, 1106), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1094, 1106), False, 'import os\n'), ((1118, 1138), 'binaryornot.check.is_binary', 'is_binary', (['full_path'], {}), '(full_path)\n', (1127, 1138), False, 'from binaryornot.check import is_binary\n')] |
import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class MetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(MetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
| [
"numpy.abs",
"collections.namedtuple",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.expand_dims",
"inspect.isclass",
"sklearn.ensemble.GradientBoostingRegressor"
] | [((6132, 6145), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (6142, 6145), True, 'import numpy as np\n'), ((6158, 6171), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (6168, 6171), True, 'import numpy as np\n'), ((7884, 7916), 'numpy.hstack', 'np.hstack', (['[X, y_hat_meta_prime]'], {}), '([X, y_hat_meta_prime])\n', (7893, 7916), True, 'import numpy as np\n'), ((8715, 8742), 'numpy.hstack', 'np.hstack', (['[X, y_hat_prime]'], {}), '([X, y_hat_prime])\n', (8724, 8742), True, 'import numpy as np\n'), ((8812, 8863), 'collections.namedtuple', 'namedtuple', (['"""res"""', "['y_mean', 'y_lower', 'y_upper']"], {}), "('res', ['y_mean', 'y_lower', 'y_upper'])\n", (8822, 8863), False, 'from collections import namedtuple\n'), ((1030, 1065), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '(**config)\n', (1055, 1065), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((1801, 1823), 'inspect.isclass', 'inspect.isclass', (['model'], {}), '(model)\n', (1816, 1823), False, 'import inspect\n'), ((6296, 6405), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'shuffle': 'randomize_samples', 'test_size': 'meta_fraction', 'random_state': 'self.random_seed'}), '(X, y, shuffle=randomize_samples, test_size=meta_fraction,\n random_state=self.random_seed)\n', (6312, 6405), False, 'from sklearn.model_selection import train_test_split\n'), ((7322, 7349), 'numpy.abs', 'np.abs', (['(y_hat_meta - y_meta)'], {}), '(y_hat_meta - y_meta)\n', (7328, 7349), True, 'import numpy as np\n'), ((7803, 7828), 'numpy.expand_dims', 'np.expand_dims', (['y_hat', '(-1)'], {}), '(y_hat, -1)\n', (7817, 7828), True, 'import numpy as np\n'), ((8634, 8659), 'numpy.expand_dims', 'np.expand_dims', (['y_hat', '(-1)'], {}), '(y_hat, -1)\n', (8648, 8659), True, 'import numpy as np\n')] |
import logging
import subprocess
from threading import Thread
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent, \
PreferencesEvent, PreferencesUpdateEvent
from ulauncher.api.shared.action.ExtensionCustomAction import \
ExtensionCustomAction
from ulauncher.api.shared.action.RenderResultListAction import \
RenderResultListAction
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from dendron.preferences import PreferencesEventListener, PreferencesUpdateEventListener
from dendron.query_listener import KeywordQueryEventListener
from dendron.item_listener import ItemEnterEventListener
logger = logging.getLogger(__name__)
class DendronExtension(Extension):
""" Main Extension Class """
def __init__(self):
""" Initializes the extension """
super(DendronExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent,
PreferencesUpdateEventListener())
def load_notes(self):
""" Load Dendron notes into memory """
th = Thread(target=self.dendron.load_notes)
th.daemon = True
th.start()
def search_notes(self, query):
""" Search notes """
notes = self.dendron.search(query)
items = []
if len(notes) == 0:
return RenderResultListAction([
ExtensionResultItem(icon='images/icon.png',
name='No notes found',
highlightable=False)
])
for item in notes[:8]:
items.append(
ExtensionResultItem(icon='images/icon.png',
name=item['title'],
description=item['file'],
on_enter=ExtensionCustomAction({
'action':
'open_note',
'path':
item['path']
})))
return RenderResultListAction(items)
def open_note(self, path):
""" Open the selected note on the configured Dendron workspace """
cmd = self.preferences["dendron_cmd"]
cmd = cmd.replace("%f%", path)
subprocess.run(cmd, shell=True)
def reload_action(self):
""" Shows reload action """
return RenderResultListAction([
ExtensionResultItem(icon='images/icon.png',
name='Reload notes',
highlightable=False,
on_enter=ExtensionCustomAction(
{'action': 'reload'}))
])
| [
"logging.getLogger",
"ulauncher.api.shared.item.ExtensionResultItem.ExtensionResultItem",
"dendron.query_listener.KeywordQueryEventListener",
"subprocess.run",
"ulauncher.api.shared.action.RenderResultListAction.RenderResultListAction",
"ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAct... | [((713, 740), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (730, 740), False, 'import logging\n'), ((1323, 1361), 'threading.Thread', 'Thread', ([], {'target': 'self.dendron.load_notes'}), '(target=self.dendron.load_notes)\n', (1329, 1361), False, 'from threading import Thread\n'), ((2361, 2390), 'ulauncher.api.shared.action.RenderResultListAction.RenderResultListAction', 'RenderResultListAction', (['items'], {}), '(items)\n', (2383, 2390), False, 'from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction\n'), ((2592, 2623), 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2606, 2623), False, 'import subprocess\n'), ((969, 996), 'dendron.query_listener.KeywordQueryEventListener', 'KeywordQueryEventListener', ([], {}), '()\n', (994, 996), False, 'from dendron.query_listener import KeywordQueryEventListener\n'), ((1037, 1061), 'dendron.item_listener.ItemEnterEventListener', 'ItemEnterEventListener', ([], {}), '()\n', (1059, 1061), False, 'from dendron.item_listener import ItemEnterEventListener\n'), ((1104, 1130), 'dendron.preferences.PreferencesEventListener', 'PreferencesEventListener', ([], {}), '()\n', (1128, 1130), False, 'from dendron.preferences import PreferencesEventListener, PreferencesUpdateEventListener\n'), ((1202, 1234), 'dendron.preferences.PreferencesUpdateEventListener', 'PreferencesUpdateEventListener', ([], {}), '()\n', (1232, 1234), False, 'from dendron.preferences import PreferencesEventListener, PreferencesUpdateEventListener\n'), ((1622, 1713), 'ulauncher.api.shared.item.ExtensionResultItem.ExtensionResultItem', 'ExtensionResultItem', ([], {'icon': '"""images/icon.png"""', 'name': '"""No notes found"""', 'highlightable': '(False)'}), "(icon='images/icon.png', name='No notes found',\n highlightable=False)\n", (1641, 1713), False, 'from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem\n'), ((2077, 2145), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (["{'action': 'open_note', 'path': item['path']}"], {}), "({'action': 'open_note', 'path': item['path']})\n", (2098, 2145), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n'), ((2933, 2976), 'ulauncher.api.shared.action.ExtensionCustomAction.ExtensionCustomAction', 'ExtensionCustomAction', (["{'action': 'reload'}"], {}), "({'action': 'reload'})\n", (2954, 2976), False, 'from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction\n')] |
# pylint: disable=redefined-outer-name
# pylint: disable=too-many-lines
import itertools
import pytest
from buzzard.test.tools import assert_tiles_eq
from buzzard.test import make_tile_set
ANY = 42
PARAMS1 = {
'extend',
'overlap',
'exclude',
'exception',
'shrink',
}
PARAMS2 = {'br', 'tr', 'tl', 'bl'}
COMBOS = { # len = 625
(w, h, ow, oh)
for w, h, ow, oh in itertools.product(range(5), range(5), range(5), range(5))
}
FAIL_COMBOS = { # len = 525
(w, h, ow, oh)
for w, h, ow, oh in COMBOS
if w == 0 or h == 0
or ow >= w or oh >= h
}
VALID_COMBOS = COMBOS - FAIL_COMBOS # len = 100
FIT_XY_COMBOS = { # len = 25
(w, h, ow, oh)
for w, h, ow, oh in VALID_COMBOS
if ((w == 3) or (w == 2 and ow == 1) or (w == 1)) and
((h == 3) or (h == 2 and oh == 1) or (h == 1))
}
NOFIT_XY_COMBOS = VALID_COMBOS - FIT_XY_COMBOS # len = 75
EXTRA_COMBO = [
list(coords) + [be, bel]
for (coords, be, bel) in itertools.product(
[(2, 2, 0, 1)],
PARAMS1 - {'exception'},
PARAMS2 - {'br'},
)
]
# *************************************************************************** **
# FIXTURES ****************************************************************** **
# *************************************************************************** **
@pytest.fixture(scope='module')
def fps():
"""
See make_tile_set
A B C D E
F G H I J
K L M N O
P Q R S T
U V W X Y
"""
return make_tile_set.make_tile_set(5, [1, -1], [1, -1])
def pytest_generate_tests(metafunc):
"""
Testing all 625 combinations of parameters for a 3x3 footprint and up to 4x4 tile
- Assert that exceptions are raised
- Assert that return values are valid
"""
if metafunc.function == test_fail:
metafunc.parametrize(
argnames='w, h, ow, oh',
argvalues=FAIL_COMBOS,
)
if metafunc.function == test_fit_xy:
metafunc.parametrize(
argnames='w, h, ow, oh',
argvalues=FIT_XY_COMBOS,
)
if metafunc.function in [
test_nofit_xy_br_extend,
test_nofit_xy_br_overlap,
test_nofit_xy_br_exclude,
test_nofit_xy_br_shrink,
test_nofit_xy_exception,
]:
metafunc.parametrize(
argnames='w, h, ow, oh',
argvalues=NOFIT_XY_COMBOS,
)
@pytest.fixture(params=PARAMS2)
def boundary_effect_locus(request):
return request.param
@pytest.fixture(params=PARAMS1)
def boundary_effect(request):
return request.param
# *************************************************************************** **
# TESTS ******************************************************************** **
# *************************************************************************** **
def test_fail(fps, w, h, ow, oh):
with pytest.raises(ValueError):
fps.GS.tile((w, h), ow, oh, boundary_effect='extend')
def test_nofit_xy_exception(fps, w, h, ow, oh, boundary_effect_locus):
with pytest.raises(ValueError, match='There is a gap'): # TODO MOVE!!
fps.GS.tile(
(w, h), ow, oh,
boundary_effect='exception', boundary_effect_locus=boundary_effect_locus
)
def test_fit_xy(fps, w, h, ow, oh, boundary_effect, boundary_effect_locus):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles fit inside origin
"""
if (1, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.G, fps.H, fps.I, ],
[fps.L, fps.M, fps.N, ],
[fps.Q, fps.R, fps.S, ],
]
elif (1, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN],
[fps.LQ, fps.MR, fps.NS],
]
elif (1, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GQ, fps.HR, fps.IS, ],
]
elif (2, 1, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.HI],
[fps.LM, fps.MN],
[fps.QR, fps.RS],
]
elif (2, 2, 1, 1) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
[fps.LR, fps.MS],
]
elif (2, 3, 1, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.HS],
]
elif (3, 1, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GI, ],
[fps.LN, ],
[fps.QS, ],
]
elif (3, 2, ANY, 1) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.LS],
]
elif (3, 3, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GS, ],
]
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile(
(w, h), ow, oh, boundary_effect=boundary_effect, boundary_effect_locus=boundary_effect_locus
)
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_extend(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'extend' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN, ],
[fps.QV, fps.RW, fps.SX, ],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.IJ, ],
[fps.LM, fps.NO, ],
[fps.QR, fps.ST, ],
]
elif (2, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IO, ],
[fps.QW, fps.SY, ],
]
elif (2, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IO],
[fps.LR, fps.NT],
]
elif (2, 2, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
[fps.QW, fps.RX],
]
elif (2, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.IT, ],
]
elif (3, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.QX],
]
elif (4, 1, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GJ],
[fps.LO],
[fps.QT],
]
elif (4, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GO],
[fps.QY],
]
elif (4, 2, ANY, 1) == (w, h, ANY, oh):
truth = [
[fps.GO],
[fps.LT],
]
elif (4, 3, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GT],
]
elif (4, 4, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GY],
]
elif (1, 4, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GV, fps.HW, fps.IX],
]
elif (2, 4, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GW, fps.IY],
]
elif (2, 4, 1, ANY) == (w, h, ow, ANY):
truth = [
[fps.GW, fps.HX],
]
elif (3, 4, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GX],
]
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='extend')
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_overlap(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'overlap' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN, ],
[fps.LQ, fps.MR, fps.NS, ],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.HI, ],
[fps.LM, fps.MN, ],
[fps.QR, fps.RS, ],
]
elif (2, 2, ANY, ANY) == (w, h, ANY, ANY):
truth = [
[fps.GM, fps.HN, ],
[fps.LR, fps.MS, ],
]
elif (2, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.HS, ],
]
elif (3, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.LS],
]
elif ((4, ANY, ANY, ANY) == (w, ANY, ANY, ANY) or
(ANY, 4, ANY, ANY) == (ANY, h, ANY, ANY)):
with pytest.raises(ValueError, match='overlap'):
_ = fps.GS.tile((w, h), ow, oh, boundary_effect='overlap')
return
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='overlap')
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_exclude(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'exclude' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, ],
[fps.LM, ],
[fps.QR, ],
]
elif (2, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GM, ],
]
elif (2, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GM, ],
[fps.LR, ],
]
elif (2, 2, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
]
elif (2, 3, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, ],
]
elif (3, 2, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GN],
]
elif (4, ANY, ANY, ANY) == (w, ANY, ANY, ANY):
truth = []
elif (ANY, 4, ANY, ANY) == (ANY, h, ANY, ANY):
truth = []
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='exclude')
assert_tiles_eq(tiles, truth)
def test_nofit_xy_br_shrink(fps, w, h, ow, oh):
"""
Compares tiling versus truth that is manually inputed
Handles combinations of parameters where all tiles DO NOT fit inside origin
for 'shrink' parameter
"""
if (1, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GL, fps.HM, fps.IN, ],
[fps.Q, fps.R, fps.S, ],
]
elif (2, 1, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GH, fps.I, ],
[fps.LM, fps.N, ],
[fps.QR, fps.S, ],
]
elif (2, 2, 0, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IN, ],
[fps.QR, fps.S, ],
]
elif (2, 2, 0, 1) == (w, h, ow, oh):
truth = [
[fps.GM, fps.IN],
[fps.LR, fps.NS],
]
elif (2, 2, 1, 0) == (w, h, ow, oh):
truth = [
[fps.GM, fps.HN],
[fps.QR, fps.RS],
]
elif ((2, 3, 0, ANY) == (w, h, ow, ANY) or
(2, 4, 0, ANY) == (w, h, ow, ANY)):
truth = [
[fps.GR, fps.IS, ],
]
elif ((3, 2, ANY, 0) == (w, h, ANY, oh) or
(4, 2, ANY, 0) == (w, h, ANY, oh)):
truth = [
[fps.GN],
[fps.QS],
]
elif ((3, 4, ANY, ANY) == (w, h, ANY, ANY) or
(4, 3, ANY, ANY) == (w, h, ANY, ANY) or
(4, 4, ANY, ANY) == (w, h, ANY, ANY)):
truth = [
[fps.GS],
]
elif (1, 4, 0, ANY) == (w, h, ow, ANY):
truth = [
[fps.GQ, fps.HR, fps.IS],
]
elif (4, 1, ANY, 0) == (w, h, ANY, oh):
truth = [
[fps.GI],
[fps.LN],
[fps.QS],
]
elif (4, 2, ANY, 1) == (w, h, ANY, oh):
truth = [
[fps.GN],
[fps.LS],
]
elif (2, 4, 1, ANY) == (w, h, ow, ANY):
truth = [
[fps.GR, fps.HS],
]
else:
raise Exception('Test %s not implemented' % str((w, h, ow, oh)))
tiles = fps.GS.tile((w, h), ow, oh, boundary_effect='shrink')
assert_tiles_eq(tiles, truth)
@pytest.mark.parametrize(
"w, h, ow, oh, boundary_effect, boundary_effect_locus", EXTRA_COMBO
)
def test_extra(fps, w, h, ow, oh, boundary_effect, boundary_effect_locus):
if (2, 2, 0, 1) == (w, h, ow, oh):
if boundary_effect_locus == 'tr':
if boundary_effect == 'extend':
truth = [
[fps.GM, fps.IO],
[fps.LR, fps.NT],
]
elif boundary_effect == 'overlap':
truth = [
[fps.GM, fps.HN],
[fps.LR, fps.MS],
]
elif boundary_effect == 'exclude':
truth = [
[fps.GM],
[fps.LR],
]
elif boundary_effect == 'shrink':
truth = [
[fps.GM, fps.IN],
[fps.LR, fps.NS],
]
else:
assert False
elif boundary_effect_locus == 'tl' or boundary_effect_locus == 'bl':
if boundary_effect == 'extend':
truth = [
[fps.FL, fps.HN],
[fps.KQ, fps.MS],
]
elif boundary_effect == 'overlap':
truth = [
[fps.GM, fps.HN],
[fps.LR, fps.MS],
]
elif boundary_effect == 'exclude':
truth = [
[fps.HN],
[fps.MS],
]
elif boundary_effect == 'shrink':
truth = [
[fps.GL, fps.HN],
[fps.LQ, fps.MS],
]
else:
assert False
else:
assert False
tiles = fps.GS.tile(
(w, h), ow, oh,
boundary_effect=boundary_effect, boundary_effect_locus=boundary_effect_locus
)
assert_tiles_eq(tiles, truth)
def test_value_error(fps):
with pytest.raises(ValueError, match='shape'):
fps.AI.tile(1)
with pytest.raises(ValueError, match='shape'):
fps.AI.tile([1, 1, 1])
with pytest.raises(ValueError, match='effect'):
fps.AI.tile((1, 1), boundary_effect='')
with pytest.raises(ValueError, match='effect_locus'):
fps.AI.tile((1, 1), boundary_effect_locus='')
| [
"itertools.product",
"pytest.mark.parametrize",
"pytest.raises",
"pytest.fixture",
"buzzard.test.tools.assert_tiles_eq",
"buzzard.test.make_tile_set.make_tile_set"
] | [((1320, 1350), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1334, 1350), False, 'import pytest\n'), ((2402, 2432), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'PARAMS2'}), '(params=PARAMS2)\n', (2416, 2432), False, 'import pytest\n'), ((2497, 2527), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'PARAMS1'}), '(params=PARAMS1)\n', (2511, 2527), False, 'import pytest\n'), ((11957, 12053), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w, h, ow, oh, boundary_effect, boundary_effect_locus"""', 'EXTRA_COMBO'], {}), "('w, h, ow, oh, boundary_effect, boundary_effect_locus',\n EXTRA_COMBO)\n", (11980, 12053), False, 'import pytest\n'), ((1481, 1529), 'buzzard.test.make_tile_set.make_tile_set', 'make_tile_set.make_tile_set', (['(5)', '[1, -1]', '[1, -1]'], {}), '(5, [1, -1], [1, -1])\n', (1508, 1529), False, 'from buzzard.test import make_tile_set\n'), ((4879, 4908), 'buzzard.test.tools.assert_tiles_eq', 'assert_tiles_eq', (['tiles', 'truth'], {}), '(tiles, truth)\n', (4894, 4908), False, 'from buzzard.test.tools import assert_tiles_eq\n'), ((7191, 7220), 'buzzard.test.tools.assert_tiles_eq', 'assert_tiles_eq', (['tiles', 'truth'], {}), '(tiles, truth)\n', (7206, 7220), False, 'from buzzard.test.tools import assert_tiles_eq\n'), ((8529, 8558), 'buzzard.test.tools.assert_tiles_eq', 'assert_tiles_eq', (['tiles', 'truth'], {}), '(tiles, truth)\n', (8544, 8558), False, 'from buzzard.test.tools import assert_tiles_eq\n'), ((9831, 9860), 'buzzard.test.tools.assert_tiles_eq', 'assert_tiles_eq', (['tiles', 'truth'], {}), '(tiles, truth)\n', (9846, 9860), False, 'from buzzard.test.tools import assert_tiles_eq\n'), ((11924, 11953), 'buzzard.test.tools.assert_tiles_eq', 'assert_tiles_eq', (['tiles', 'truth'], {}), '(tiles, truth)\n', (11939, 11953), False, 'from buzzard.test.tools import assert_tiles_eq\n'), ((13862, 13891), 'buzzard.test.tools.assert_tiles_eq', 'assert_tiles_eq', (['tiles', 'truth'], {}), '(tiles, truth)\n', (13877, 13891), False, 'from buzzard.test.tools import assert_tiles_eq\n'), ((964, 1040), 'itertools.product', 'itertools.product', (['[(2, 2, 0, 1)]', "(PARAMS1 - {'exception'})", "(PARAMS2 - {'br'})"], {}), "([(2, 2, 0, 1)], PARAMS1 - {'exception'}, PARAMS2 - {'br'})\n", (981, 1040), False, 'import itertools\n'), ((2871, 2896), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2884, 2896), False, 'import pytest\n'), ((3042, 3091), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""There is a gap"""'}), "(ValueError, match='There is a gap')\n", (3055, 3091), False, 'import pytest\n'), ((13929, 13969), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""shape"""'}), "(ValueError, match='shape')\n", (13942, 13969), False, 'import pytest\n'), ((14003, 14043), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""shape"""'}), "(ValueError, match='shape')\n", (14016, 14043), False, 'import pytest\n'), ((14085, 14126), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""effect"""'}), "(ValueError, match='effect')\n", (14098, 14126), False, 'import pytest\n'), ((14185, 14232), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""effect_locus"""'}), "(ValueError, match='effect_locus')\n", (14198, 14232), False, 'import pytest\n'), ((8245, 8287), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""overlap"""'}), "(ValueError, match='overlap')\n", (8258, 8287), False, 'import pytest\n')] |
import unittest
from collections import defaultdict
import numpy as np
import pandas as pd
from ife.io.io import ImageReader
class TestMomentFeatures(unittest.TestCase):
def test_moment_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = features.moment()
self.assertIs(np.ndarray, type(moment))
moment = features.moment(output_type="")
self.assertIs(np.ndarray, type(moment))
moment = features.moment(output_type="one_col")
self.assertIs(np.ndarray, type(moment))
self.assertEqual(np.zeros(15).shape, moment.shape) # type: ignore
moment = features.moment(output_type="dict")
self.assertIs(defaultdict, type(moment))
moment = features.moment(output_type="pandas")
self.assertIs(pd.DataFrame, type(moment))
def test_colourfulness_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = features.colourfulness()
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="")
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="one_col")
self.assertIs(np.float64, type(moment))
moment = features.colourfulness(output_type="dict")
self.assertIs(dict, type(moment))
moment = features.colourfulness(output_type="pandas")
self.assertIs(pd.DataFrame, type(moment))
| [
"ife.io.io.ImageReader.read_from_single_file",
"numpy.zeros"
] | [((240, 299), 'ife.io.io.ImageReader.read_from_single_file', 'ImageReader.read_from_single_file', (['"""ife/data/small_rgb.jpg"""'], {}), "('ife/data/small_rgb.jpg')\n", (273, 299), False, 'from ife.io.io import ImageReader\n'), ((945, 1004), 'ife.io.io.ImageReader.read_from_single_file', 'ImageReader.read_from_single_file', (['"""ife/data/small_rgb.jpg"""'], {}), "('ife/data/small_rgb.jpg')\n", (978, 1004), False, 'from ife.io.io import ImageReader\n'), ((612, 624), 'numpy.zeros', 'np.zeros', (['(15)'], {}), '(15)\n', (620, 624), True, 'import numpy as np\n')] |
import numpy as np
from math import pi
import torch
from pykeops.torch import LazyTensor
from plyfile import PlyData, PlyElement
from helper import *
import torch.nn as nn
import torch.nn.functional as F
# from matplotlib import pyplot as plt
from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids, from_matrix
from math import pi, sqrt
# Input-Output for tests =======================================================
import os
from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData
def save_vtk(
fname, xyz, triangles=None, values=None, vectors=None, triangle_values=None
):
"""Saves a point cloud or triangle mesh as a .vtk file.
Files can be opened with Paraview or displayed using the PyVista library.
Args:
fname (string): filename.
xyz (Tensor): (N,3) point cloud or vertices.
triangles (integer Tensor, optional): (T,3) mesh connectivity. Defaults to None.
values (Tensor, optional): (N,D) values, supported by the vertices. Defaults to None.
vectors (Tensor, optional): (N,3) vectors, supported by the vertices. Defaults to None.
triangle_values (Tensor, optional): (T,D) values, supported by the triangles. Defaults to None.
"""
# Encode the points/vertices as a VTK structure:
if triangles is None: # Point cloud
structure = PolyData(points=numpy(xyz), vertices=np.arange(len(xyz)))
else: # Surface mesh
structure = PolyData(points=numpy(xyz), polygons=numpy(triangles))
data = [structure]
pointdata, celldata = [], []
# Point values - one channel per column of the `values` array:
if values is not None:
values = numpy(values)
if len(values.shape) == 1:
values = values[:, None]
features = values.T
pointdata += [
Scalars(f, name=f"features_{i:02d}") for i, f in enumerate(features)
]
# Point vectors - one vector per point:
if vectors is not None:
pointdata += [Vectors(numpy(vectors), name="vectors")]
# Store in the VTK object:
if pointdata != []:
pointdata = PointData(*pointdata)
data.append(pointdata)
# Triangle values - one channel per column of the `triangle_values` array:
if triangle_values is not None:
triangle_values = numpy(triangle_values)
if len(triangle_values.shape) == 1:
triangle_values = triangle_values[:, None]
features = triangle_values.T
celldata += [
Scalars(f, name=f"features_{i:02d}") for i, f in enumerate(features)
]
celldata = CellData(*celldata)
data.append(celldata)
# Write to hard drive:
vtk = VtkData(*data)
os.makedirs(os.path.dirname(fname), exist_ok=True)
vtk.tofile(fname)
# On-the-fly generation of the surfaces ========================================
def subsample(x, batch=None, scale=1.0):
"""Subsamples the point cloud using a grid (cubic) clustering scheme.
The function returns one average sample per cell, as described in Fig. 3.e)
of the paper.
Args:
x (Tensor): (N,3) point cloud.
batch (integer Tensor, optional): (N,) batch vector, as in PyTorch_geometric.
Defaults to None.
scale (float, optional): side length of the cubic grid cells. Defaults to 1 (Angstrom).
Returns:
(M,3): sub-sampled point cloud, with M <= N.
"""
if batch is None: # Single protein case:
if True: # Use a fast scatter_add_ implementation
labels = grid_cluster(x, scale).long()
C = labels.max() + 1
# We append a "1" to the input vectors, in order to
# compute both the numerator and denominator of the "average"
# fraction in one pass through the data.
x_1 = torch.cat((x, torch.ones_like(x[:, :1])), dim=1)
D = x_1.shape[1]
points = torch.zeros_like(x_1[:C])
points.scatter_add_(0, labels[:, None].repeat(1, D), x_1)
return (points[:, :-1] / points[:, -1:]).contiguous()
else: # Older implementation;
points = scatter(points * weights[:, None], labels, dim=0)
weights = scatter(weights, labels, dim=0)
points = points / weights[:, None]
else: # We process proteins using a for loop.
# This is probably sub-optimal, but I don't really know
# how to do more elegantly (this type of computation is
# not super well supported by PyTorch).
batch_size = torch.max(batch).item() + 1 # Typically, =32
points, batches = [], []
for b in range(batch_size):
p = subsample(x[batch == b], scale=scale)
points.append(p)
batches.append(b * torch.ones_like(batch[: len(p)]))
return torch.cat(points, dim=0), torch.cat(batches, dim=0)
def soft_distances(x, y, batch_x, batch_y, smoothness=0.01, atomtypes=None):
"""Computes a soft distance function to the atom centers of a protein.
Implements Eq. (1) of the paper in a fast and numerically stable way.
Args:
x (Tensor): (N,3) atom centers.
y (Tensor): (M,3) sampling locations.
batch_x (integer Tensor): (N,) batch vector for x, as in PyTorch_geometric.
batch_y (integer Tensor): (M,) batch vector for y, as in PyTorch_geometric.
smoothness (float, optional): atom radii if atom types are not provided. Defaults to .01.
atomtypes (integer Tensor, optional): (N,6) one-hot encoding of the atom chemical types. Defaults to None.
Returns:
Tensor: (M,) values of the soft distance function on the points `y`.
"""
# Build the (N, M, 1) symbolic matrix of squared distances:
x_i = LazyTensor(x[:, None, :]) # (N, 1, 3) atoms
y_j = LazyTensor(y[None, :, :]) # (1, M, 3) sampling points
D_ij = ((x_i - y_j) ** 2).sum(-1) # (N, M, 1) squared distances
# Use a block-diagonal sparsity mask to support heterogeneous batch processing:
D_ij.ranges = diagonal_ranges(batch_x, batch_y)
if atomtypes is not None:
# Turn the one-hot encoding "atomtypes" into a vector of diameters "smoothness_i":
# (N, 6) -> (N, 1, 1) (There are 6 atom types)
atomic_radii = torch.FloatTensor(
[170, 110, 152, 155, 180, 190], device=x.device
)
atomic_radii = atomic_radii / atomic_radii.min()
atomtype_radii = atomtypes * atomic_radii[None, :] # n_atoms, n_atomtypes
# smoothness = atomtypes @ atomic_radii # (N, 6) @ (6,) = (N,)
smoothness = torch.sum(
smoothness * atomtype_radii, dim=1, keepdim=False
) # n_atoms, 1
smoothness_i = LazyTensor(smoothness[:, None, None])
# Compute an estimation of the mean smoothness in a neighborhood
# of each sampling point:
# density = (-D_ij.sqrt()).exp().sum(0).view(-1) # (M,) local density of atoms
# smooth = (smoothness_i * (-D_ij.sqrt()).exp()).sum(0).view(-1) # (M,)
# mean_smoothness = smooth / density # (M,)
# soft_dists = -mean_smoothness * (
# (-D_ij.sqrt() / smoothness_i).logsumexp(dim=0)
# ).view(-1)
mean_smoothness = (-D_ij.sqrt()).exp().sum(0)
mean_smoothness_j = LazyTensor(mean_smoothness[None, :, :])
mean_smoothness = (
smoothness_i * (-D_ij.sqrt()).exp() / mean_smoothness_j
) # n_atoms, n_points, 1
mean_smoothness = mean_smoothness.sum(0).view(-1)
soft_dists = -mean_smoothness * (
(-D_ij.sqrt() / smoothness_i).logsumexp(dim=0)
).view(-1)
else:
soft_dists = -smoothness * ((-D_ij.sqrt() / smoothness).logsumexp(dim=0)).view(
-1
)
return soft_dists
def atoms_to_points_normals(
atoms,
batch,
distance=1.05,
smoothness=0.5,
resolution=1.0,
nits=4,
atomtypes=None,
sup_sampling=20,
variance=0.1,
):
"""Turns a collection of atoms into an oriented point cloud.
Sampling algorithm for protein surfaces, described in Fig. 3 of the paper.
Args:
atoms (Tensor): (N,3) coordinates of the atom centers `a_k`.
batch (integer Tensor): (N,) batch vector, as in PyTorch_geometric.
distance (float, optional): value of the level set to sample from
the smooth distance function. Defaults to 1.05.
smoothness (float, optional): radii of the atoms, if atom types are
not provided. Defaults to 0.5.
resolution (float, optional): side length of the cubic cells in
the final sub-sampling pass. Defaults to 1.0.
nits (int, optional): number of iterations . Defaults to 4.
atomtypes (Tensor, optional): (N,6) one-hot encoding of the atom
chemical types. Defaults to None.
Returns:
(Tensor): (M,3) coordinates for the surface points `x_i`.
(Tensor): (M,3) unit normals `n_i`.
(integer Tensor): (M,) batch vector, as in PyTorch_geometric.
"""
# a) Parameters for the soft distance function and its level set:
T = distance
N, D = atoms.shape
B = sup_sampling # Sup-sampling ratio
# Batch vectors:
batch_atoms = batch
batch_z = batch[:, None].repeat(1, B).view(N * B)
# b) Draw N*B points at random in the neighborhood of our atoms
z = atoms[:, None, :] + 10 * T * torch.randn(N, B, D).type_as(atoms)
z = z.view(-1, D) # (N*B, D)
# We don't want to backprop through a full network here!
atoms = atoms.detach().contiguous()
z = z.detach().contiguous()
# N.B.: Test mode disables the autograd engine: we must switch it on explicitely.
with torch.enable_grad():
if z.is_leaf:
z.requires_grad = True
# c) Iterative loop: gradient descent along the potential
# ".5 * (dist - T)^2" with respect to the positions z of our samples
for it in range(nits):
dists = soft_distances(
atoms,
z,
batch_atoms,
batch_z,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = ((dists - T) ** 2).sum()
g = torch.autograd.grad(Loss, z)[0]
z.data -= 0.5 * g
# d) Only keep the points which are reasonably close to the level set:
dists = soft_distances(
atoms, z, batch_atoms, batch_z, smoothness=smoothness, atomtypes=atomtypes
)
margin = (dists - T).abs()
mask = margin < variance * T
# d') And remove the points that are trapped *inside* the protein:
zz = z.detach()
zz.requires_grad = True
for it in range(nits):
dists = soft_distances(
atoms,
zz,
batch_atoms,
batch_z,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = (1.0 * dists).sum()
g = torch.autograd.grad(Loss, zz)[0]
normals = F.normalize(g, p=2, dim=-1) # (N, 3)
zz = zz + 1.0 * T * normals
dists = soft_distances(
atoms, zz, batch_atoms, batch_z, smoothness=smoothness, atomtypes=atomtypes
)
mask = mask & (dists > 1.5 * T)
z = z[mask].contiguous().detach()
batch_z = batch_z[mask].contiguous().detach()
# e) Subsample the point cloud:
points, batch_points = subsample(z, batch_z, scale=resolution)
# f) Compute the normals on this smaller point cloud:
p = points.detach()
p.requires_grad = True
dists = soft_distances(
atoms,
p,
batch_atoms,
batch_points,
smoothness=smoothness,
atomtypes=atomtypes,
)
Loss = (1.0 * dists).sum()
g = torch.autograd.grad(Loss, p)[0]
normals = F.normalize(g, p=2, dim=-1) # (N, 3)
points = points - 0.5 * normals
return points.detach(), normals.detach(), batch_points.detach()
# Surface mesh -> Normals ======================================================
def mesh_normals_areas(vertices, triangles=None, scale=[1.0], batch=None, normals=None):
"""Returns a smooth field of normals, possibly at different scales.
points, triangles or normals, scale(s) -> normals
(N, 3), (3, T) or (N,3), (S,) -> (N, 3) or (N, S, 3)
Simply put - if `triangles` are provided:
1. Normals are first computed for every triangle using simple 3D geometry
and are weighted according to surface area.
2. The normal at any given vertex is then computed as the weighted average
of the normals of all triangles in a neighborhood specified
by Gaussian windows whose radii are given in the list of "scales".
If `normals` are provided instead, we simply smooth the discrete vector
field using Gaussian windows whose radii are given in the list of "scales".
If more than one scale is provided, normal fields are computed in parallel
and returned in a single 3D tensor.
Args:
vertices (Tensor): (N,3) coordinates of mesh vertices or 3D points.
triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
scale (list of floats, optional): (S,) radii of the Gaussian smoothing windows. Defaults to [1.].
batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
normals (Tensor, optional): (N,3) raw normals vectors on the vertices. Defaults to None.
Returns:
(Tensor): (N,3) or (N,S,3) point normals.
(Tensor): (N,) point areas, if triangles were provided.
"""
# Single- or Multi-scale mode:
if hasattr(scale, "__len__"):
scales, single_scale = scale, False
else:
scales, single_scale = [scale], True
scales = torch.Tensor(scales).type_as(vertices) # (S,)
# Compute the "raw" field of normals:
if triangles is not None:
# Vertices of all triangles in the mesh:
A = vertices[triangles[0, :]] # (N, 3)
B = vertices[triangles[1, :]] # (N, 3)
C = vertices[triangles[2, :]] # (N, 3)
# Triangle centers and normals (length = surface area):
centers = (A + B + C) / 3 # (N, 3)
V = (B - A).cross(C - A) # (N, 3)
# Vertice areas:
S = (V ** 2).sum(-1).sqrt() / 6 # (N,) 1/3 of a triangle area
areas = torch.zeros(len(vertices)).type_as(vertices) # (N,)
areas.scatter_add_(0, triangles[0, :], S) # Aggregate from "A's"
areas.scatter_add_(0, triangles[1, :], S) # Aggregate from "B's"
areas.scatter_add_(0, triangles[2, :], S) # Aggregate from "C's"
else: # Use "normals" instead
areas = None
V = normals
centers = vertices
# Normal of a vertex = average of all normals in a ball of size "scale":
x_i = LazyTensor(vertices[:, None, :]) # (N, 1, 3)
y_j = LazyTensor(centers[None, :, :]) # (1, M, 3)
v_j = LazyTensor(V[None, :, :]) # (1, M, 3)
s = LazyTensor(scales[None, None, :]) # (1, 1, S)
D_ij = ((x_i - y_j) ** 2).sum(-1) # (N, M, 1)
K_ij = (-D_ij / (2 * s ** 2)).exp() # (N, M, S)
# Support for heterogeneous batch processing:
if batch is not None:
batch_vertices = batch
batch_centers = batch[triangles[0, :]] if triangles is not None else batch
K_ij.ranges = diagonal_ranges(batch_vertices, batch_centers)
if single_scale:
U = (K_ij * v_j).sum(dim=1) # (N, 3)
else:
U = (K_ij.tensorprod(v_j)).sum(dim=1) # (N, S*3)
U = U.view(-1, len(scales), 3) # (N, S, 3)
normals = F.normalize(U, p=2, dim=-1) # (N, 3) or (N, S, 3)
return normals, areas
# Compute tangent planes and curvatures ========================================
def tangent_vectors(normals):
"""Returns a pair of vector fields u and v to complete the orthonormal basis [n,u,v].
normals -> uv
(N, 3) or (N, S, 3) -> (N, 2, 3) or (N, S, 2, 3)
This routine assumes that the 3D "normal" vectors are normalized.
It is based on the 2017 paper from Pixar, "Building an orthonormal basis, revisited".
Args:
normals (Tensor): (N,3) or (N,S,3) normals `n_i`, i.e. unit-norm 3D vectors.
Returns:
(Tensor): (N,2,3) or (N,S,2,3) unit vectors `u_i` and `v_i` to complete
the tangent coordinate systems `[n_i,u_i,v_i].
"""
x, y, z = normals[..., 0], normals[..., 1], normals[..., 2]
s = (2 * (z >= 0)) - 1.0 # = z.sign(), but =1. if z=0.
a = -1 / (s + z)
b = x * y * a
uv = torch.stack((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)
uv = uv.view(uv.shape[:-1] + (2, 3))
return uv
def curvatures(
vertices, triangles=None, scales=[1.0], batch=None, normals=None, reg=0.01
):
"""Returns a collection of mean (H) and Gauss (K) curvatures at different scales.
points, faces, scales -> (H_1, K_1, ..., H_S, K_S)
(N, 3), (3, N), (S,) -> (N, S*2)
We rely on a very simple linear regression method, for all vertices:
1. Estimate normals and surface areas.
2. Compute a local tangent frame.
3. In a pseudo-geodesic Gaussian neighborhood at scale s,
compute the two (2, 2) covariance matrices PPt and PQt
between the displacement vectors "P = x_i - x_j" and
the normals "Q = n_i - n_j", projected on the local tangent plane.
4. Up to the sign, the shape operator S at scale s is then approximated
as "S = (reg**2 * I_2 + PPt)^-1 @ PQt".
5. The mean and Gauss curvatures are the trace and determinant of
this (2, 2) matrix.
As of today, this implementation does not weigh points by surface areas:
this could make a sizeable difference if protein surfaces were not
sub-sampled to ensure uniform sampling density.
For convergence analysis, see for instance
"Efficient curvature estimation for oriented point clouds",
Cao, Li, Sun, Assadi, Zhang, 2019.
Args:
vertices (Tensor): (N,3) coordinates of the points or mesh vertices.
triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
scales (list of floats, optional): list of (S,) smoothing scales. Defaults to [1.].
batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
normals (Tensor, optional): (N,3) field of "raw" unit normals. Defaults to None.
reg (float, optional): small amount of Tikhonov/ridge regularization
in the estimation of the shape operator. Defaults to .01.
Returns:
(Tensor): (N, S*2) tensor of mean and Gauss curvatures computed for
every point at the required scales.
"""
# Number of points, number of scales:
N, S = vertices.shape[0], len(scales)
ranges = diagonal_ranges(batch)
# Compute the normals at different scales + vertice areas:
normals_s, _ = mesh_normals_areas(
vertices, triangles=triangles, normals=normals, scale=scales, batch=batch
) # (N, S, 3), (N,)
# Local tangent bases:
uv_s = tangent_vectors(normals_s) # (N, S, 2, 3)
features = []
for s, scale in enumerate(scales):
# Extract the relevant descriptors at the current scale:
normals = normals_s[:, s, :].contiguous() # (N, 3)
uv = uv_s[:, s, :, :].contiguous() # (N, 2, 3)
# Encode as symbolic tensors:
# Points:
x_i = LazyTensor(vertices.view(N, 1, 3))
x_j = LazyTensor(vertices.view(1, N, 3))
# Normals:
n_i = LazyTensor(normals.view(N, 1, 3))
n_j = LazyTensor(normals.view(1, N, 3))
# Tangent bases:
uv_i = LazyTensor(uv.view(N, 1, 6))
# Pseudo-geodesic squared distance:
d2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-d2_ij / (2 * (scale ** 2))).exp() # (N, N, 1)
# Project on the tangent plane:
P_ij = uv_i.matvecmult(x_j - x_i) # (N, N, 2)
Q_ij = uv_i.matvecmult(n_j - n_i) # (N, N, 2)
# Concatenate:
PQ_ij = P_ij.concat(Q_ij) # (N, N, 2+2)
# Covariances, with a scale-dependent weight:
PPt_PQt_ij = P_ij.tensorprod(PQ_ij) # (N, N, 2*(2+2))
PPt_PQt_ij = window_ij * PPt_PQt_ij # (N, N, 2*(2+2))
# Reduction - with batch support:
PPt_PQt_ij.ranges = ranges
PPt_PQt = PPt_PQt_ij.sum(1) # (N, 2*(2+2))
# Reshape to get the two covariance matrices:
PPt_PQt = PPt_PQt.view(N, 2, 2, 2)
PPt, PQt = PPt_PQt[:, :, 0, :], PPt_PQt[:, :, 1, :] # (N, 2, 2), (N, 2, 2)
# Add a small ridge regression:
PPt[:, 0, 0] += reg
PPt[:, 1, 1] += reg
# (minus) Shape operator, i.e. the differential of the Gauss map:
# = (PPt^-1 @ PQt) : simple estimation through linear regression
S = torch.solve(PQt, PPt).solution
a, b, c, d = S[:, 0, 0], S[:, 0, 1], S[:, 1, 0], S[:, 1, 1] # (N,)
# Normalization
mean_curvature = a + d
gauss_curvature = a * d - b * c
features += [mean_curvature.clamp(-1, 1), gauss_curvature.clamp(-1, 1)]
features = torch.stack(features, dim=-1)
return features
# Fast tangent convolution layer ===============================================
class ContiguousBackward(torch.autograd.Function):
"""
Function to ensure contiguous gradient in backward pass. To be applied after PyKeOps reduction.
N.B.: This workaround fixes a bug that will be fixed in ulterior KeOp releases.
"""
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output.contiguous()
class dMaSIFConv(nn.Module):
def __init__(
self, in_channels=1, out_channels=1, radius=1.0, hidden_units=None, cheap=False
):
"""Creates the KeOps convolution layer.
I = in_channels is the dimension of the input features
O = out_channels is the dimension of the output features
H = hidden_units is the dimension of the intermediate representation
radius is the size of the pseudo-geodesic Gaussian window w_ij = W(d_ij)
This affordable layer implements an elementary "convolution" operator
on a cloud of N points (x_i) in dimension 3 that we decompose in three steps:
1. Apply the MLP "net_in" on the input features "f_i". (N, I) -> (N, H)
2. Compute H interaction terms in parallel with:
f_i = sum_j [ w_ij * conv(P_ij) * f_j ]
In the equation above:
- w_ij is a pseudo-geodesic window with a set radius.
- P_ij is a vector of dimension 3, equal to "x_j-x_i"
in the local oriented basis at x_i.
- "conv" is an MLP from R^3 to R^H:
- with 1 linear layer if "cheap" is True;
- with 2 linear layers and C=8 intermediate "cuts" otherwise.
- "*" is coordinate-wise product.
- f_j is the vector of transformed features.
3. Apply the MLP "net_out" on the output features. (N, H) -> (N, O)
A more general layer would have implemented conv(P_ij) as a full
(H, H) matrix instead of a mere (H,) vector... At a much higher
computational cost. The reasoning behind the code below is that
a given time budget is better spent on using a larger architecture
and more channels than on a very complex convolution operator.
Interactions between channels happen at steps 1. and 3.,
whereas the (costly) point-to-point interaction step 2.
lets the network aggregate information in spatial neighborhoods.
Args:
in_channels (int, optional): numper of input features per point. Defaults to 1.
out_channels (int, optional): number of output features per point. Defaults to 1.
radius (float, optional): deviation of the Gaussian window on the
quasi-geodesic distance `d_ij`. Defaults to 1..
hidden_units (int, optional): number of hidden features per point.
Defaults to out_channels.
cheap (bool, optional): shall we use a 1-layer deep Filter,
instead of a 2-layer deep MLP? Defaults to False.
"""
super(dMaSIFConv, self).__init__()
self.Input = in_channels
self.Output = out_channels
self.Radius = radius
self.Hidden = self.Output if hidden_units is None else hidden_units
self.Cuts = 8 # Number of hidden units for the 3D MLP Filter.
self.cheap = cheap
# For performance reasons, we cut our "hidden" vectors
# in n_heads "independent heads" of dimension 8.
self.heads_dim = 8 # 4 is probably too small; 16 is certainly too big
# We accept "Hidden" dimensions of size 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, ...
if self.Hidden < self.heads_dim:
self.heads_dim = self.Hidden
if self.Hidden % self.heads_dim != 0:
raise ValueError(f"The dimension of the hidden units ({self.Hidden})"\
+ f"should be a multiple of the heads dimension ({self.heads_dim}).")
else:
self.n_heads = self.Hidden // self.heads_dim
# Transformation of the input features:
self.net_in = nn.Sequential(
nn.Linear(self.Input, self.Hidden), # (H, I) + (H,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Hidden, self.Hidden), # (H, H) + (H,)
# nn.LayerNorm(self.Hidden),#nn.BatchNorm1d(self.Hidden),
nn.LeakyReLU(negative_slope=0.2),
) # (H,)
self.norm_in = nn.GroupNorm(4, self.Hidden)
# self.norm_in = nn.LayerNorm(self.Hidden)
# self.norm_in = nn.Identity()
# 3D convolution filters, encoded as an MLP:
if cheap:
self.conv = nn.Sequential(
nn.Linear(3, self.Hidden), nn.ReLU() # (H, 3) + (H,)
) # KeOps does not support well LeakyReLu
else:
self.conv = nn.Sequential(
nn.Linear(3, self.Cuts), # (C, 3) + (C,)
nn.ReLU(), # KeOps does not support well LeakyReLu
nn.Linear(self.Cuts, self.Hidden),
) # (H, C) + (H,)
# Transformation of the output features:
self.net_out = nn.Sequential(
nn.Linear(self.Hidden, self.Output), # (O, H) + (O,)
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(self.Output, self.Output), # (O, O) + (O,)
# nn.LayerNorm(self.Output),#nn.BatchNorm1d(self.Output),
nn.LeakyReLU(negative_slope=0.2),
) # (O,)
self.norm_out = nn.GroupNorm(4, self.Output)
# self.norm_out = nn.LayerNorm(self.Output)
# self.norm_out = nn.Identity()
# Custom initialization for the MLP convolution filters:
# we get interesting piecewise affine cuts on a normalized neighborhood.
with torch.no_grad():
nn.init.normal_(self.conv[0].weight)
nn.init.uniform_(self.conv[0].bias)
self.conv[0].bias *= 0.8 * (self.conv[0].weight ** 2).sum(-1).sqrt()
if not cheap:
nn.init.uniform_(
self.conv[2].weight,
a=-1 / np.sqrt(self.Cuts),
b=1 / np.sqrt(self.Cuts),
)
nn.init.normal_(self.conv[2].bias)
self.conv[2].bias *= 0.5 * (self.conv[2].weight ** 2).sum(-1).sqrt()
def forward(self, points, nuv, features, ranges=None):
"""Performs a quasi-geodesic interaction step.
points, local basis, in features -> out features
(N, 3), (N, 3, 3), (N, I) -> (N, O)
This layer computes the interaction step of Eq. (7) in the paper,
in-between the application of two MLP networks independently on all
feature vectors.
Args:
points (Tensor): (N,3) point coordinates `x_i`.
nuv (Tensor): (N,3,3) local coordinate systems `[n_i,u_i,v_i]`.
features (Tensor): (N,I) input feature vectors `f_i`.
ranges (6-uple of integer Tensors, optional): low-level format
to support batch processing, as described in the KeOps documentation.
In practice, this will be built by a higher-level object
to encode the relevant "batch vectors" in a way that is convenient
for the KeOps CUDA engine. Defaults to None.
Returns:
(Tensor): (N,O) output feature vectors `f'_i`.
"""
# 1. Transform the input features: -------------------------------------
features = self.net_in(features) # (N, I) -> (N, H)
features = features.transpose(1, 0)[None, :, :] # (1,H,N)
features = self.norm_in(features)
features = features[0].transpose(1, 0).contiguous() # (1, H, N) -> (N, H)
# 2. Compute the local "shape contexts": -------------------------------
# 2.a Normalize the kernel radius:
points = points / (sqrt(2.0) * self.Radius) # (N, 3)
# 2.b Encode the variables as KeOps LazyTensors
# Vertices:
x_i = LazyTensor(points[:, None, :]) # (N, 1, 3)
x_j = LazyTensor(points[None, :, :]) # (1, N, 3)
# WARNING - Here, we assume that the normals are fixed:
normals = (
nuv[:, 0, :].contiguous().detach()
) # (N, 3) - remove the .detach() if needed
# Local bases:
nuv_i = LazyTensor(nuv.view(-1, 1, 9)) # (N, 1, 9)
# Normals:
n_i = nuv_i[:3] # (N, 1, 3)
n_j = LazyTensor(normals[None, :, :]) # (1, N, 3)
# To avoid register spilling when using large embeddings, we perform our KeOps reduction
# over the vector of length "self.Hidden = self.n_heads * self.heads_dim"
# as self.n_heads reduction over vectors of length self.heads_dim (= "Hd" in the comments).
head_out_features = []
for head in range(self.n_heads):
# Extract a slice of width Hd from the feature array
head_start = head * self.heads_dim
head_end = head_start + self.heads_dim
head_features = features[:, head_start:head_end].contiguous() # (N, H) -> (N, Hd)
# Features:
f_j = LazyTensor(head_features[None, :, :]) # (1, N, Hd)
# Convolution parameters:
if self.cheap:
# Extract a slice of Hd lines: (H, 3) -> (Hd, 3)
A = self.conv[0].weight[head_start:head_end, :].contiguous()
# Extract a slice of Hd coefficients: (H,) -> (Hd,)
B = self.conv[0].bias[head_start:head_end].contiguous()
AB = torch.cat((A, B[:, None]), dim=1) # (Hd, 4)
ab = LazyTensor(AB.view(1, 1, -1)) # (1, 1, Hd*4)
else:
A_1, B_1 = self.conv[0].weight, self.conv[0].bias # (C, 3), (C,)
# Extract a slice of Hd lines: (H, C) -> (Hd, C)
A_2 = self.conv[2].weight[head_start:head_end, :].contiguous()
# Extract a slice of Hd coefficients: (H,) -> (Hd,)
B_2 = self.conv[2].bias[head_start:head_end].contiguous()
a_1 = LazyTensor(A_1.view(1, 1, -1)) # (1, 1, C*3)
b_1 = LazyTensor(B_1.view(1, 1, -1)) # (1, 1, C)
a_2 = LazyTensor(A_2.view(1, 1, -1)) # (1, 1, Hd*C)
b_2 = LazyTensor(B_2.view(1, 1, -1)) # (1, 1, Hd)
# 2.c Pseudo-geodesic window:
# Pseudo-geodesic squared distance:
d2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-d2_ij).exp() # (N, N, 1)
# 2.d Local MLP:
# Local coordinates:
X_ij = nuv_i.matvecmult(x_j - x_i) # (N, N, 9) "@" (N, N, 3) = (N, N, 3)
# MLP:
if self.cheap:
X_ij = ab.matvecmult(
X_ij.concat(LazyTensor(1))
) # (N, N, Hd*4) @ (N, N, 3+1) = (N, N, Hd)
X_ij = X_ij.relu() # (N, N, Hd)
else:
X_ij = a_1.matvecmult(X_ij) + b_1 # (N, N, C)
X_ij = X_ij.relu() # (N, N, C)
X_ij = a_2.matvecmult(X_ij) + b_2 # (N, N, Hd)
X_ij = X_ij.relu()
# 2.e Actual computation:
F_ij = window_ij * X_ij * f_j # (N, N, Hd)
F_ij.ranges = ranges # Support for batches and/or block-sparsity
head_out_features.append(ContiguousBackward().apply(F_ij.sum(dim=1))) # (N, Hd)
# Concatenate the result of our n_heads "attention heads":
features = torch.cat(head_out_features, dim=1) # n_heads * (N, Hd) -> (N, H)
# 3. Transform the output features: ------------------------------------
features = self.net_out(features) # (N, H) -> (N, O)
features = features.transpose(1, 0)[None, :, :] # (1,O,N)
features = self.norm_out(features)
features = features[0].transpose(1, 0).contiguous()
return features
| [
"torch.nn.ReLU",
"numpy.sqrt",
"torch.max",
"math.sqrt",
"torch.sum",
"torch.nn.GroupNorm",
"pyvtk.PointData",
"pyvtk.VtkData",
"pyvtk.CellData",
"torch.zeros_like",
"torch.nn.init.uniform_",
"pykeops.torch.LazyTensor",
"torch.randn",
"torch.ones_like",
"torch.nn.LeakyReLU",
"torch.Ten... | [((2722, 2736), 'pyvtk.VtkData', 'VtkData', (['*data'], {}), '(*data)\n', (2729, 2736), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((5780, 5805), 'pykeops.torch.LazyTensor', 'LazyTensor', (['x[:, None, :]'], {}), '(x[:, None, :])\n', (5790, 5805), False, 'from pykeops.torch import LazyTensor\n'), ((5835, 5860), 'pykeops.torch.LazyTensor', 'LazyTensor', (['y[None, :, :]'], {}), '(y[None, :, :])\n', (5845, 5860), False, 'from pykeops.torch import LazyTensor\n'), ((15017, 15049), 'pykeops.torch.LazyTensor', 'LazyTensor', (['vertices[:, None, :]'], {}), '(vertices[:, None, :])\n', (15027, 15049), False, 'from pykeops.torch import LazyTensor\n'), ((15073, 15104), 'pykeops.torch.LazyTensor', 'LazyTensor', (['centers[None, :, :]'], {}), '(centers[None, :, :])\n', (15083, 15104), False, 'from pykeops.torch import LazyTensor\n'), ((15128, 15153), 'pykeops.torch.LazyTensor', 'LazyTensor', (['V[None, :, :]'], {}), '(V[None, :, :])\n', (15138, 15153), False, 'from pykeops.torch import LazyTensor\n'), ((15175, 15208), 'pykeops.torch.LazyTensor', 'LazyTensor', (['scales[None, None, :]'], {}), '(scales[None, None, :])\n', (15185, 15208), False, 'from pykeops.torch import LazyTensor\n'), ((15791, 15818), 'torch.nn.functional.normalize', 'F.normalize', (['U'], {'p': '(2)', 'dim': '(-1)'}), '(U, p=2, dim=-1)\n', (15802, 15818), True, 'import torch.nn.functional as F\n'), ((16763, 16840), 'torch.stack', 'torch.stack', (['(1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y)'], {'dim': '(-1)'}), '((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)\n', (16774, 16840), False, 'import torch\n'), ((21434, 21463), 'torch.stack', 'torch.stack', (['features'], {'dim': '(-1)'}), '(features, dim=-1)\n', (21445, 21463), False, 'import torch\n'), ((2146, 2167), 'pyvtk.PointData', 'PointData', (['*pointdata'], {}), '(*pointdata)\n', (2155, 2167), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((2633, 2652), 'pyvtk.CellData', 'CellData', (['*celldata'], {}), '(*celldata)\n', (2641, 2652), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((2753, 2775), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (2768, 2775), False, 'import os\n'), ((4848, 4872), 'torch.cat', 'torch.cat', (['points'], {'dim': '(0)'}), '(points, dim=0)\n', (4857, 4872), False, 'import torch\n'), ((4874, 4899), 'torch.cat', 'torch.cat', (['batches'], {'dim': '(0)'}), '(batches, dim=0)\n', (4883, 4899), False, 'import torch\n'), ((6298, 6364), 'torch.FloatTensor', 'torch.FloatTensor', (['[170, 110, 152, 155, 180, 190]'], {'device': 'x.device'}), '([170, 110, 152, 155, 180, 190], device=x.device)\n', (6315, 6364), False, 'import torch\n'), ((6620, 6680), 'torch.sum', 'torch.sum', (['(smoothness * atomtype_radii)'], {'dim': '(1)', 'keepdim': '(False)'}), '(smoothness * atomtype_radii, dim=1, keepdim=False)\n', (6629, 6680), False, 'import torch\n'), ((6740, 6777), 'pykeops.torch.LazyTensor', 'LazyTensor', (['smoothness[:, None, None]'], {}), '(smoothness[:, None, None])\n', (6750, 6777), False, 'from pykeops.torch import LazyTensor\n'), ((7316, 7355), 'pykeops.torch.LazyTensor', 'LazyTensor', (['mean_smoothness[None, :, :]'], {}), '(mean_smoothness[None, :, :])\n', (7326, 7355), False, 'from pykeops.torch import LazyTensor\n'), ((9730, 9749), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (9747, 9749), False, 'import torch\n'), ((11976, 12003), 'torch.nn.functional.normalize', 'F.normalize', (['g'], {'p': '(2)', 'dim': '(-1)'}), '(g, p=2, dim=-1)\n', (11987, 12003), True, 'import torch.nn.functional as F\n'), ((26007, 26035), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(4)', 'self.Hidden'], {}), '(4, self.Hidden)\n', (26019, 26035), True, 'import torch.nn as nn\n'), ((27049, 27077), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(4)', 'self.Output'], {}), '(4, self.Output)\n', (27061, 27077), True, 'import torch.nn as nn\n'), ((29580, 29610), 'pykeops.torch.LazyTensor', 'LazyTensor', (['points[:, None, :]'], {}), '(points[:, None, :])\n', (29590, 29610), False, 'from pykeops.torch import LazyTensor\n'), ((29638, 29668), 'pykeops.torch.LazyTensor', 'LazyTensor', (['points[None, :, :]'], {}), '(points[None, :, :])\n', (29648, 29668), False, 'from pykeops.torch import LazyTensor\n'), ((30022, 30053), 'pykeops.torch.LazyTensor', 'LazyTensor', (['normals[None, :, :]'], {}), '(normals[None, :, :])\n', (30032, 30053), False, 'from pykeops.torch import LazyTensor\n'), ((33146, 33181), 'torch.cat', 'torch.cat', (['head_out_features'], {'dim': '(1)'}), '(head_out_features, dim=1)\n', (33155, 33181), False, 'import torch\n'), ((1855, 1891), 'pyvtk.Scalars', 'Scalars', (['f'], {'name': 'f"""features_{i:02d}"""'}), "(f, name=f'features_{i:02d}')\n", (1862, 1891), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((2534, 2570), 'pyvtk.Scalars', 'Scalars', (['f'], {'name': 'f"""features_{i:02d}"""'}), "(f, name=f'features_{i:02d}')\n", (2541, 2570), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((3950, 3975), 'torch.zeros_like', 'torch.zeros_like', (['x_1[:C]'], {}), '(x_1[:C])\n', (3966, 3975), False, 'import torch\n'), ((11104, 11131), 'torch.nn.functional.normalize', 'F.normalize', (['g'], {'p': '(2)', 'dim': '(-1)'}), '(g, p=2, dim=-1)\n', (11115, 11131), True, 'import torch.nn.functional as F\n'), ((11926, 11954), 'torch.autograd.grad', 'torch.autograd.grad', (['Loss', 'p'], {}), '(Loss, p)\n', (11945, 11954), False, 'import torch\n'), ((13972, 13992), 'torch.Tensor', 'torch.Tensor', (['scales'], {}), '(scales)\n', (13984, 13992), False, 'import torch\n'), ((21135, 21156), 'torch.solve', 'torch.solve', (['PQt', 'PPt'], {}), '(PQt, PPt)\n', (21146, 21156), False, 'import torch\n'), ((25684, 25718), 'torch.nn.Linear', 'nn.Linear', (['self.Input', 'self.Hidden'], {}), '(self.Input, self.Hidden)\n', (25693, 25718), True, 'import torch.nn as nn\n'), ((25749, 25781), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (25761, 25781), True, 'import torch.nn as nn\n'), ((25795, 25830), 'torch.nn.Linear', 'nn.Linear', (['self.Hidden', 'self.Hidden'], {}), '(self.Hidden, self.Hidden)\n', (25804, 25830), True, 'import torch.nn as nn\n'), ((25931, 25963), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (25943, 25963), True, 'import torch.nn as nn\n'), ((26723, 26758), 'torch.nn.Linear', 'nn.Linear', (['self.Hidden', 'self.Output'], {}), '(self.Hidden, self.Output)\n', (26732, 26758), True, 'import torch.nn as nn\n'), ((26789, 26821), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (26801, 26821), True, 'import torch.nn as nn\n'), ((26835, 26870), 'torch.nn.Linear', 'nn.Linear', (['self.Output', 'self.Output'], {}), '(self.Output, self.Output)\n', (26844, 26870), True, 'import torch.nn as nn\n'), ((26971, 27003), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (26983, 27003), True, 'import torch.nn as nn\n'), ((27330, 27345), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27343, 27345), False, 'import torch\n'), ((27359, 27395), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv[0].weight'], {}), '(self.conv[0].weight)\n', (27374, 27395), True, 'import torch.nn as nn\n'), ((27408, 27443), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.conv[0].bias'], {}), '(self.conv[0].bias)\n', (27424, 27443), True, 'import torch.nn as nn\n'), ((30721, 30758), 'pykeops.torch.LazyTensor', 'LazyTensor', (['head_features[None, :, :]'], {}), '(head_features[None, :, :])\n', (30731, 30758), False, 'from pykeops.torch import LazyTensor\n'), ((10265, 10293), 'torch.autograd.grad', 'torch.autograd.grad', (['Loss', 'z'], {}), '(Loss, z)\n', (10284, 10293), False, 'import torch\n'), ((11049, 11078), 'torch.autograd.grad', 'torch.autograd.grad', (['Loss', 'zz'], {}), '(Loss, zz)\n', (11068, 11078), False, 'import torch\n'), ((26253, 26278), 'torch.nn.Linear', 'nn.Linear', (['(3)', 'self.Hidden'], {}), '(3, self.Hidden)\n', (26262, 26278), True, 'import torch.nn as nn\n'), ((26280, 26289), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (26287, 26289), True, 'import torch.nn as nn\n'), ((26431, 26454), 'torch.nn.Linear', 'nn.Linear', (['(3)', 'self.Cuts'], {}), '(3, self.Cuts)\n', (26440, 26454), True, 'import torch.nn as nn\n'), ((26489, 26498), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (26496, 26498), True, 'import torch.nn as nn\n'), ((26557, 26590), 'torch.nn.Linear', 'nn.Linear', (['self.Cuts', 'self.Hidden'], {}), '(self.Cuts, self.Hidden)\n', (26566, 26590), True, 'import torch.nn as nn\n'), ((27754, 27788), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv[2].bias'], {}), '(self.conv[2].bias)\n', (27769, 27788), True, 'import torch.nn as nn\n'), ((29453, 29462), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (29457, 29462), False, 'from math import pi, sqrt\n'), ((31145, 31178), 'torch.cat', 'torch.cat', (['(A, B[:, None])'], {'dim': '(1)'}), '((A, B[:, None]), dim=1)\n', (31154, 31178), False, 'import torch\n'), ((3577, 3599), 'pykeops.torch.cluster.grid_cluster', 'grid_cluster', (['x', 'scale'], {}), '(x, scale)\n', (3589, 3599), False, 'from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids, from_matrix\n'), ((3865, 3890), 'torch.ones_like', 'torch.ones_like', (['x[:, :1]'], {}), '(x[:, :1])\n', (3880, 3890), False, 'import torch\n'), ((4573, 4589), 'torch.max', 'torch.max', (['batch'], {}), '(batch)\n', (4582, 4589), False, 'import torch\n'), ((9430, 9450), 'torch.randn', 'torch.randn', (['N', 'B', 'D'], {}), '(N, B, D)\n', (9441, 9450), False, 'import torch\n'), ((32439, 32452), 'pykeops.torch.LazyTensor', 'LazyTensor', (['(1)'], {}), '(1)\n', (32449, 32452), False, 'from pykeops.torch import LazyTensor\n'), ((27654, 27672), 'numpy.sqrt', 'np.sqrt', (['self.Cuts'], {}), '(self.Cuts)\n', (27661, 27672), True, 'import numpy as np\n'), ((27700, 27718), 'numpy.sqrt', 'np.sqrt', (['self.Cuts'], {}), '(self.Cuts)\n', (27707, 27718), True, 'import numpy as np\n')] |
from random import randint, seed
import numpy as np
from os import path, mkdir
from maze_utils import generate_grid
seed_number = 69
training_folder = "training"
testing_folder = "testing"
tot_elem_training = 100 # numero di matrici da generare
tot_elem_testing = 20 # numero di matrici da generare
max_w = 10 # massima altezza
max_h = 10 # massima lunghezza
min_w = 3 # minima altezza
min_h = 3 # minima larghezza
def generate_dataset():
"""
Genera il dataset di training e testing creando matrici a caso
di dimensione massima 10x10, minima 3x3 e con un numero minimo di 1 muro
:return:
"""
# imposto il seed
np.random.seed(seed_number)
seed(seed_number)
generate_training(tot_elem_training)
generate_testing(tot_elem_testing)
def generate_testing(dim: int):
"""
Genera il dataset di testing.
Se la cartella non esiste la crea e la popola con matrici a caso.
:param dim: numero di matrici da creare
:return:
"""
# se la cartella non esiste la creo
if not path.exists(testing_folder):
mkdir(testing_folder)
for elem in range(dim):
file_name = f"{testing_folder}/matrice_{elem}"
# scelta random di w, h e walls
w = randint(min_w, max_w)
h = randint(min_h, max_h)
walls = randint(1, int(w * h / 2) - 1)
grid = generate_grid(w, h, walls=walls)
np.savetxt(file_name, grid, delimiter=" ", fmt='%i')
def generate_training(dim: int):
"""
Genera il dataset di training.
Se la cartella non esiste la crea e la popola con matrici a caso.
:param dim: numero di matrici da creare
:return:
"""
# se la cartella non esiste la creo
if not path.exists(training_folder):
mkdir(training_folder)
for elem in range(dim):
file_name = f"{training_folder}/matrice_{elem}"\
# scelta random di w, h e walls
w = randint(min_w, max_w)
h = randint(min_h, max_h)
walls = randint(1, int(w * h / 2) - 1)
grid = generate_grid(w, h, walls=walls)
np.savetxt(file_name, grid, delimiter=" ", fmt='%i')
if __name__ == "__main__":
generate_dataset()
| [
"os.path.exists",
"maze_utils.generate_grid",
"random.seed",
"os.mkdir",
"numpy.random.seed",
"numpy.savetxt",
"random.randint"
] | [((707, 734), 'numpy.random.seed', 'np.random.seed', (['seed_number'], {}), '(seed_number)\n', (721, 734), True, 'import numpy as np\n'), ((739, 756), 'random.seed', 'seed', (['seed_number'], {}), '(seed_number)\n', (743, 756), False, 'from random import randint, seed\n'), ((1101, 1128), 'os.path.exists', 'path.exists', (['testing_folder'], {}), '(testing_folder)\n', (1112, 1128), False, 'from os import path, mkdir\n'), ((1138, 1159), 'os.mkdir', 'mkdir', (['testing_folder'], {}), '(testing_folder)\n', (1143, 1159), False, 'from os import path, mkdir\n'), ((1297, 1318), 'random.randint', 'randint', (['min_w', 'max_w'], {}), '(min_w, max_w)\n', (1304, 1318), False, 'from random import randint, seed\n'), ((1331, 1352), 'random.randint', 'randint', (['min_h', 'max_h'], {}), '(min_h, max_h)\n', (1338, 1352), False, 'from random import randint, seed\n'), ((1416, 1448), 'maze_utils.generate_grid', 'generate_grid', (['w', 'h'], {'walls': 'walls'}), '(w, h, walls=walls)\n', (1429, 1448), False, 'from maze_utils import generate_grid\n'), ((1458, 1510), 'numpy.savetxt', 'np.savetxt', (['file_name', 'grid'], {'delimiter': '""" """', 'fmt': '"""%i"""'}), "(file_name, grid, delimiter=' ', fmt='%i')\n", (1468, 1510), True, 'import numpy as np\n'), ((1777, 1805), 'os.path.exists', 'path.exists', (['training_folder'], {}), '(training_folder)\n', (1788, 1805), False, 'from os import path, mkdir\n'), ((1815, 1837), 'os.mkdir', 'mkdir', (['training_folder'], {}), '(training_folder)\n', (1820, 1837), False, 'from os import path, mkdir\n'), ((1977, 1998), 'random.randint', 'randint', (['min_w', 'max_w'], {}), '(min_w, max_w)\n', (1984, 1998), False, 'from random import randint, seed\n'), ((2011, 2032), 'random.randint', 'randint', (['min_h', 'max_h'], {}), '(min_h, max_h)\n', (2018, 2032), False, 'from random import randint, seed\n'), ((2096, 2128), 'maze_utils.generate_grid', 'generate_grid', (['w', 'h'], {'walls': 'walls'}), '(w, h, walls=walls)\n', (2109, 2128), False, 'from maze_utils import generate_grid\n'), ((2138, 2190), 'numpy.savetxt', 'np.savetxt', (['file_name', 'grid'], {'delimiter': '""" """', 'fmt': '"""%i"""'}), "(file_name, grid, delimiter=' ', fmt='%i')\n", (2148, 2190), True, 'import numpy as np\n')] |
"""create tables
Revision ID: 6fb351569d30
Revises: 4<PASSWORD>1ff38b
Create Date: 2019-05-06 21:59:43.998735
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '4<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('purpose', sa.String(length=64), nullable=True),
sa.Column('status', sa.Enum('Active', 'Closed', name='accountstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('institution', sa.String(length=64), nullable=True),
sa.Column('sort_code', sa.String(length=64), nullable=True),
sa.Column('account_no', sa.String(length=64), nullable=True),
sa.Column('BIC', sa.String(length=64), nullable=True),
sa.Column('IBAN', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('address1', sa.String(length=64), nullable=True),
sa.Column('address2', sa.String(length=64), nullable=True),
sa.Column('address3', sa.String(length=64), nullable=True),
sa.Column('county', sa.String(length=64), nullable=True),
sa.Column('countryISO', sa.String(length=64), nullable=True),
sa.Column('eircode', sa.String(length=64), nullable=True),
sa.Column('telephone', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('household',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('address1', sa.String(length=64), nullable=True),
sa.Column('address2', sa.String(length=64), nullable=True),
sa.Column('address3', sa.String(length=64), nullable=True),
sa.Column('county', sa.String(length=64), nullable=True),
sa.Column('eircode', sa.String(length=64), nullable=True),
sa.Column('telephone', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('nominal_account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('code', sa.String(length=64), nullable=True),
sa.Column('description', sa.String(length=64), nullable=True),
sa.Column('SOFA_heading', sa.Enum('Donations_and_legacies', 'Income_from_charitable_activities', 'Other_trading_activities', 'Investments', 'Other_income', 'Raising_funds', 'Expenditure_on_charitable_activities', 'Other_expenditure', name='nominalaccountsofaheading', schema='glod', inherit_schema=True), nullable=True),
sa.Column('category', sa.Enum('Income', 'Expenditure', 'Fixed_assets', 'Current_assets', 'Liabilities', name='nominalaccountcategory', schema='glod', inherit_schema=True), nullable=True),
sa.Column('sub_category', sa.Enum('Tangible_assets', 'Investments', 'Debtors', 'Cash_at_bank_and_in_hand', 'Creditors_Amounts_falling_due_in_one_year', 'Creditors_Amounts_falling_due_after_more_than_one_year', 'Agency_accounts', 'Reserves', name='nominalaccountsubcategory', schema='glod', inherit_schema=True), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('organisation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('category', sa.Enum('Household', 'NonLocalHousehold', 'Company', 'Charity', 'Government', name='organisationcategory', schema='glod', inherit_schema=True), nullable=True),
sa.Column('status', sa.Enum('Active', 'Inactive', name='organisationstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('parishioner',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('surname', sa.String(length=64), nullable=True),
sa.Column('first_name', sa.String(length=64), nullable=True),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=64), nullable=True),
sa.Column('main_contact', sa.String(length=64), nullable=True),
sa.Column('household_ref_no', sa.Integer(), nullable=True),
sa.Column('mobile', sa.String(length=64), nullable=True),
sa.Column('other', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('gdpr_response', sa.String(length=64), nullable=True),
sa.Column('by_email', sa.String(length=64), nullable=True),
sa.Column('by_phone', sa.String(length=64), nullable=True),
sa.Column('by_post', sa.String(length=64), nullable=True),
sa.Column('news', sa.String(length=64), nullable=True),
sa.Column('finance', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('subject',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('select_vestry_summary', sa.String(length=64), nullable=True),
sa.Column('easter_vestry_summary', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('fund',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('restriction', sa.Enum('Unrestricted', 'Restricted', 'Endowment', name='fundrestriction', schema='glod', inherit_schema=True), nullable=True),
sa.Column('is_parish_fund', sa.Boolean(), nullable=True),
sa.Column('is_realised', sa.Boolean(), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['glod.account.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('organisation_address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('status', sa.Enum('Current', 'Prior', name='organisationaddressstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('address_id', sa.Integer(), nullable=True),
sa.Column('organisation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['address_id'], ['glod.address.id'], ),
sa.ForeignKeyConstraint(['organisation_id'], ['glod.organisation.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('person',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('family_name', sa.String(length=64), nullable=True),
sa.Column('given_name', sa.String(length=64), nullable=True),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('status', sa.Enum('Active', 'LostContact', 'Deceased', name='personstatus', schema='glod', inherit_schema=True), nullable=True),
sa.Column('mobile', sa.String(length=64), nullable=True),
sa.Column('other_phone', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('parishioner_reference_no', sa.Integer(), nullable=True),
sa.Column('organisation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['organisation_id'], ['glod.organisation.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('statement_item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('details', sa.String(length=64), nullable=True),
sa.Column('currency', sa.String(length=64), nullable=True),
sa.Column('debit', sa.Numeric(scale=2), nullable=True),
sa.Column('credit', sa.Numeric(scale=2), nullable=True),
sa.Column('balance', sa.Numeric(scale=2), nullable=True),
sa.Column('detail_override', sa.String(length=64), nullable=True),
sa.Column('designated_balance', sa.Enum('No', 'Opening', 'Closing', name='statementitemdesignatedbalance', schema='glod', inherit_schema=True), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['glod.account.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('communication_permission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_main_contact', sa.Boolean(), nullable=True),
sa.Column('gdpr_response', sa.DateTime(), nullable=True),
sa.Column('by_email', sa.Boolean(), nullable=True),
sa.Column('by_phone', sa.Boolean(), nullable=True),
sa.Column('by_post', sa.Boolean(), nullable=True),
sa.Column('news', sa.Boolean(), nullable=True),
sa.Column('finance', sa.Boolean(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('counterparty',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('bank_text', sa.String(length=64), nullable=True),
sa.Column('name_override', sa.String(length=64), nullable=True),
sa.Column('method', sa.String(length=64), nullable=True),
sa.Column('has_SO_card', sa.Boolean(), nullable=True),
sa.Column('by_email', sa.Boolean(), nullable=True),
sa.Column('notes', sa.String(length=1024), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.Column('organisation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['organisation_id'], ['glod.organisation.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('pps',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pps', sa.String(length=64), nullable=True),
sa.Column('name_override', sa.String(length=64), nullable=True),
sa.Column('notes', sa.String(length=1024), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('envelope',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('envelope_number', sa.Integer(), nullable=True),
sa.Column('counterparty_id', sa.Integer(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['counterparty_id'], ['glod.counterparty.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['glod.person.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('transaction',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference_no', sa.Integer(), nullable=True),
sa.Column('public_code', sa.String(length=64), nullable=True),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('month', sa.Integer(), nullable=True),
sa.Column('day', sa.Integer(), nullable=True),
sa.Column('payment_method', sa.Enum('BankCharges', 'BankTax', 'BillpayOnline', 'CashLodgmentEnvelopes', 'CashLodgmentOther', 'CashLodgmentPlate', 'Cheque', 'DirectDebit', 'DirectPayment', 'DirectTransfer', 'InBranch', 'StandingOrderMonthly', 'StandingOrderOther', 'StandingOrderQuarterly', 'StandingOrders', 'UnrealisedGainLoss', name='paymentmethod', schema='glod', inherit_schema=True), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('amount', sa.Numeric(scale=2), nullable=True),
sa.Column('income_expenditure', sa.Enum('Income', 'Expenditure', name='incomeexpenditure', schema='glod', inherit_schema=True), nullable=True),
sa.Column('FY', sa.String(length=64), nullable=True),
sa.Column('comments', sa.String(length=1024), nullable=True),
sa.Column('counterparty_id', sa.Integer(), nullable=True),
sa.Column('subject_id', sa.Integer(), nullable=True),
sa.Column('fund_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['counterparty_id'], ['glod.counterparty.id'], ),
sa.ForeignKeyConstraint(['fund_id'], ['glod.fund.id'], ),
sa.ForeignKeyConstraint(['subject_id'], ['glod.subject.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
op.create_table('transaction_check',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('transaction_id', sa.Integer(), nullable=True),
sa.Column('statement_item_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['statement_item_id'], ['glod.statement_item.id'], ),
sa.ForeignKeyConstraint(['transaction_id'], ['glod.transaction.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='glod'
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('transaction_check', schema='glod')
op.drop_table('transaction', schema='glod')
op.drop_table('envelope', schema='glod')
op.drop_table('pps', schema='glod')
op.drop_table('counterparty', schema='glod')
op.drop_table('communication_permission', schema='glod')
op.drop_table('statement_item', schema='glod')
op.drop_table('person', schema='glod')
op.drop_table('organisation_address', schema='glod')
op.drop_table('fund', schema='glod')
op.drop_table('subject', schema='glod')
op.drop_table('parishioner', schema='glod')
op.drop_table('organisation', schema='glod')
op.drop_table('nominal_account', schema='glod')
op.drop_table('household', schema='glod')
op.drop_table('address', schema='glod')
op.drop_table('account', schema='glod')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"sqlalchemy.Boolean",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Date",
"sqlalchemy.Integer",
"sqlalchemy.Numeric",
"sqlalchemy.String",
"sqlalchemy.Enum"
] | [((13130, 13179), 'alembic.op.drop_table', 'op.drop_table', (['"""transaction_check"""'], {'schema': '"""glod"""'}), "('transaction_check', schema='glod')\n", (13143, 13179), False, 'from alembic import op\n'), ((13184, 13227), 'alembic.op.drop_table', 'op.drop_table', (['"""transaction"""'], {'schema': '"""glod"""'}), "('transaction', schema='glod')\n", (13197, 13227), False, 'from alembic import op\n'), ((13232, 13272), 'alembic.op.drop_table', 'op.drop_table', (['"""envelope"""'], {'schema': '"""glod"""'}), "('envelope', schema='glod')\n", (13245, 13272), False, 'from alembic import op\n'), ((13277, 13312), 'alembic.op.drop_table', 'op.drop_table', (['"""pps"""'], {'schema': '"""glod"""'}), "('pps', schema='glod')\n", (13290, 13312), False, 'from alembic import op\n'), ((13317, 13361), 'alembic.op.drop_table', 'op.drop_table', (['"""counterparty"""'], {'schema': '"""glod"""'}), "('counterparty', schema='glod')\n", (13330, 13361), False, 'from alembic import op\n'), ((13366, 13422), 'alembic.op.drop_table', 'op.drop_table', (['"""communication_permission"""'], {'schema': '"""glod"""'}), "('communication_permission', schema='glod')\n", (13379, 13422), False, 'from alembic import op\n'), ((13427, 13473), 'alembic.op.drop_table', 'op.drop_table', (['"""statement_item"""'], {'schema': '"""glod"""'}), "('statement_item', schema='glod')\n", (13440, 13473), False, 'from alembic import op\n'), ((13478, 13516), 'alembic.op.drop_table', 'op.drop_table', (['"""person"""'], {'schema': '"""glod"""'}), "('person', schema='glod')\n", (13491, 13516), False, 'from alembic import op\n'), ((13521, 13573), 'alembic.op.drop_table', 'op.drop_table', (['"""organisation_address"""'], {'schema': '"""glod"""'}), "('organisation_address', schema='glod')\n", (13534, 13573), False, 'from alembic import op\n'), ((13578, 13614), 'alembic.op.drop_table', 'op.drop_table', (['"""fund"""'], {'schema': '"""glod"""'}), "('fund', schema='glod')\n", (13591, 13614), False, 'from alembic import op\n'), ((13619, 13658), 'alembic.op.drop_table', 'op.drop_table', (['"""subject"""'], {'schema': '"""glod"""'}), "('subject', schema='glod')\n", (13632, 13658), False, 'from alembic import op\n'), ((13663, 13706), 'alembic.op.drop_table', 'op.drop_table', (['"""parishioner"""'], {'schema': '"""glod"""'}), "('parishioner', schema='glod')\n", (13676, 13706), False, 'from alembic import op\n'), ((13711, 13755), 'alembic.op.drop_table', 'op.drop_table', (['"""organisation"""'], {'schema': '"""glod"""'}), "('organisation', schema='glod')\n", (13724, 13755), False, 'from alembic import op\n'), ((13760, 13807), 'alembic.op.drop_table', 'op.drop_table', (['"""nominal_account"""'], {'schema': '"""glod"""'}), "('nominal_account', schema='glod')\n", (13773, 13807), False, 'from alembic import op\n'), ((13812, 13853), 'alembic.op.drop_table', 'op.drop_table', (['"""household"""'], {'schema': '"""glod"""'}), "('household', schema='glod')\n", (13825, 13853), False, 'from alembic import op\n'), ((13858, 13897), 'alembic.op.drop_table', 'op.drop_table', (['"""address"""'], {'schema': '"""glod"""'}), "('address', schema='glod')\n", (13871, 13897), False, 'from alembic import op\n'), ((13902, 13941), 'alembic.op.drop_table', 'op.drop_table', (['"""account"""'], {'schema': '"""glod"""'}), "('account', schema='glod')\n", (13915, 13941), False, 'from alembic import op\n'), ((1090, 1119), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1113, 1119), True, 'import sqlalchemy as sa\n'), ((1679, 1708), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1702, 1708), True, 'import sqlalchemy as sa\n'), ((2264, 2293), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (2287, 2293), True, 'import sqlalchemy as sa\n'), ((3389, 3418), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3412, 3418), True, 'import sqlalchemy as sa\n'), ((3975, 4004), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3998, 4004), True, 'import sqlalchemy as sa\n'), ((5131, 5160), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (5154, 5160), True, 'import sqlalchemy as sa\n'), ((5486, 5515), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (5509, 5515), True, 'import sqlalchemy as sa\n'), ((6020, 6080), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['account_id']", "['glod.account.id']"], {}), "(['account_id'], ['glod.account.id'])\n", (6043, 6080), True, 'import sqlalchemy as sa\n'), ((6088, 6117), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (6111, 6117), True, 'import sqlalchemy as sa\n'), ((6502, 6562), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['address_id']", "['glod.address.id']"], {}), "(['address_id'], ['glod.address.id'])\n", (6525, 6562), True, 'import sqlalchemy as sa\n'), ((6570, 6640), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['organisation_id']", "['glod.organisation.id']"], {}), "(['organisation_id'], ['glod.organisation.id'])\n", (6593, 6640), True, 'import sqlalchemy as sa\n'), ((6648, 6677), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (6671, 6677), True, 'import sqlalchemy as sa\n'), ((7450, 7520), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['organisation_id']", "['glod.organisation.id']"], {}), "(['organisation_id'], ['glod.organisation.id'])\n", (7473, 7520), True, 'import sqlalchemy as sa\n'), ((7528, 7557), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (7551, 7557), True, 'import sqlalchemy as sa\n'), ((8328, 8388), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['account_id']", "['glod.account.id']"], {}), "(['account_id'], ['glod.account.id'])\n", (8351, 8388), True, 'import sqlalchemy as sa\n'), ((8396, 8425), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (8419, 8425), True, 'import sqlalchemy as sa\n'), ((9010, 9068), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['person_id']", "['glod.person.id']"], {}), "(['person_id'], ['glod.person.id'])\n", (9033, 9068), True, 'import sqlalchemy as sa\n'), ((9076, 9105), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (9099, 9105), True, 'import sqlalchemy as sa\n'), ((9776, 9846), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['organisation_id']", "['glod.organisation.id']"], {}), "(['organisation_id'], ['glod.organisation.id'])\n", (9799, 9846), True, 'import sqlalchemy as sa\n'), ((9854, 9912), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['person_id']", "['glod.person.id']"], {}), "(['person_id'], ['glod.person.id'])\n", (9877, 9912), True, 'import sqlalchemy as sa\n'), ((9920, 9949), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (9943, 9949), True, 'import sqlalchemy as sa\n'), ((10305, 10363), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['person_id']", "['glod.person.id']"], {}), "(['person_id'], ['glod.person.id'])\n", (10328, 10363), True, 'import sqlalchemy as sa\n'), ((10371, 10400), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (10394, 10400), True, 'import sqlalchemy as sa\n'), ((10748, 10818), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['counterparty_id']", "['glod.counterparty.id']"], {}), "(['counterparty_id'], ['glod.counterparty.id'])\n", (10771, 10818), True, 'import sqlalchemy as sa\n'), ((10826, 10884), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['person_id']", "['glod.person.id']"], {}), "(['person_id'], ['glod.person.id'])\n", (10849, 10884), True, 'import sqlalchemy as sa\n'), ((10892, 10921), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (10915, 10921), True, 'import sqlalchemy as sa\n'), ((12307, 12377), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['counterparty_id']", "['glod.counterparty.id']"], {}), "(['counterparty_id'], ['glod.counterparty.id'])\n", (12330, 12377), True, 'import sqlalchemy as sa\n'), ((12385, 12439), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['fund_id']", "['glod.fund.id']"], {}), "(['fund_id'], ['glod.fund.id'])\n", (12408, 12439), True, 'import sqlalchemy as sa\n'), ((12447, 12507), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['subject_id']", "['glod.subject.id']"], {}), "(['subject_id'], ['glod.subject.id'])\n", (12470, 12507), True, 'import sqlalchemy as sa\n'), ((12515, 12544), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (12538, 12544), True, 'import sqlalchemy as sa\n'), ((12793, 12867), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['statement_item_id']", "['glod.statement_item.id']"], {}), "(['statement_item_id'], ['glod.statement_item.id'])\n", (12816, 12867), True, 'import sqlalchemy as sa\n'), ((12875, 12943), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['transaction_id']", "['glod.transaction.id']"], {}), "(['transaction_id'], ['glod.transaction.id'])\n", (12898, 12943), True, 'import sqlalchemy as sa\n'), ((12951, 12980), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (12974, 12980), True, 'import sqlalchemy as sa\n'), ((428, 440), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (438, 440), True, 'import sqlalchemy as sa\n'), ((489, 501), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (499, 501), True, 'import sqlalchemy as sa\n'), ((544, 564), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (553, 564), True, 'import sqlalchemy as sa\n'), ((606, 695), 'sqlalchemy.Enum', 'sa.Enum', (['"""Active"""', '"""Closed"""'], {'name': '"""accountstatus"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Active', 'Closed', name='accountstatus', schema='glod',\n inherit_schema=True)\n", (613, 695), True, 'import sqlalchemy as sa\n'), ((731, 751), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (740, 751), True, 'import sqlalchemy as sa\n'), ((798, 818), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (807, 818), True, 'import sqlalchemy as sa\n'), ((863, 883), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (872, 883), True, 'import sqlalchemy as sa\n'), ((929, 949), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (938, 949), True, 'import sqlalchemy as sa\n'), ((988, 1008), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (997, 1008), True, 'import sqlalchemy as sa\n'), ((1048, 1068), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1057, 1068), True, 'import sqlalchemy as sa\n'), ((1196, 1208), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1206, 1208), True, 'import sqlalchemy as sa\n'), ((1253, 1273), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1262, 1273), True, 'import sqlalchemy as sa\n'), ((1317, 1337), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1326, 1337), True, 'import sqlalchemy as sa\n'), ((1381, 1401), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1390, 1401), True, 'import sqlalchemy as sa\n'), ((1443, 1463), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1452, 1463), True, 'import sqlalchemy as sa\n'), ((1509, 1529), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1518, 1529), True, 'import sqlalchemy as sa\n'), ((1572, 1592), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1581, 1592), True, 'import sqlalchemy as sa\n'), ((1637, 1657), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1646, 1657), True, 'import sqlalchemy as sa\n'), ((1787, 1799), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1797, 1799), True, 'import sqlalchemy as sa\n'), ((1848, 1860), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1858, 1860), True, 'import sqlalchemy as sa\n'), ((1904, 1924), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1913, 1924), True, 'import sqlalchemy as sa\n'), ((1968, 1988), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1977, 1988), True, 'import sqlalchemy as sa\n'), ((2032, 2052), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (2041, 2052), True, 'import sqlalchemy as sa\n'), ((2094, 2114), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (2103, 2114), True, 'import sqlalchemy as sa\n'), ((2157, 2177), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (2166, 2177), True, 'import sqlalchemy as sa\n'), ((2222, 2242), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (2231, 2242), True, 'import sqlalchemy as sa\n'), ((2378, 2390), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2388, 2390), True, 'import sqlalchemy as sa\n'), ((2431, 2451), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (2440, 2451), True, 'import sqlalchemy as sa\n'), ((2498, 2518), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (2507, 2518), True, 'import sqlalchemy as sa\n'), ((2566, 2859), 'sqlalchemy.Enum', 'sa.Enum', (['"""Donations_and_legacies"""', '"""Income_from_charitable_activities"""', '"""Other_trading_activities"""', '"""Investments"""', '"""Other_income"""', '"""Raising_funds"""', '"""Expenditure_on_charitable_activities"""', '"""Other_expenditure"""'], {'name': '"""nominalaccountsofaheading"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Donations_and_legacies', 'Income_from_charitable_activities',\n 'Other_trading_activities', 'Investments', 'Other_income',\n 'Raising_funds', 'Expenditure_on_charitable_activities',\n 'Other_expenditure', name='nominalaccountsofaheading', schema='glod',\n inherit_schema=True)\n", (2573, 2859), True, 'import sqlalchemy as sa\n'), ((2887, 3043), 'sqlalchemy.Enum', 'sa.Enum', (['"""Income"""', '"""Expenditure"""', '"""Fixed_assets"""', '"""Current_assets"""', '"""Liabilities"""'], {'name': '"""nominalaccountcategory"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Income', 'Expenditure', 'Fixed_assets', 'Current_assets',\n 'Liabilities', name='nominalaccountcategory', schema='glod',\n inherit_schema=True)\n", (2894, 3043), True, 'import sqlalchemy as sa\n'), ((3083, 3384), 'sqlalchemy.Enum', 'sa.Enum', (['"""Tangible_assets"""', '"""Investments"""', '"""Debtors"""', '"""Cash_at_bank_and_in_hand"""', '"""Creditors_Amounts_falling_due_in_one_year"""', '"""Creditors_Amounts_falling_due_after_more_than_one_year"""', '"""Agency_accounts"""', '"""Reserves"""'], {'name': '"""nominalaccountsubcategory"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Tangible_assets', 'Investments', 'Debtors',\n 'Cash_at_bank_and_in_hand', 'Creditors_Amounts_falling_due_in_one_year',\n 'Creditors_Amounts_falling_due_after_more_than_one_year',\n 'Agency_accounts', 'Reserves', name='nominalaccountsubcategory', schema\n ='glod', inherit_schema=True)\n", (3090, 3384), True, 'import sqlalchemy as sa\n'), ((3500, 3512), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3510, 3512), True, 'import sqlalchemy as sa\n'), ((3553, 3573), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (3562, 3573), True, 'import sqlalchemy as sa\n'), ((3617, 3767), 'sqlalchemy.Enum', 'sa.Enum', (['"""Household"""', '"""NonLocalHousehold"""', '"""Company"""', '"""Charity"""', '"""Government"""'], {'name': '"""organisationcategory"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Household', 'NonLocalHousehold', 'Company', 'Charity',\n 'Government', name='organisationcategory', schema='glod',\n inherit_schema=True)\n", (3624, 3767), True, 'import sqlalchemy as sa\n'), ((3801, 3897), 'sqlalchemy.Enum', 'sa.Enum', (['"""Active"""', '"""Inactive"""'], {'name': '"""organisationstatus"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Active', 'Inactive', name='organisationstatus', schema='glod',\n inherit_schema=True)\n", (3808, 3897), True, 'import sqlalchemy as sa\n'), ((3941, 3953), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3951, 3953), True, 'import sqlalchemy as sa\n'), ((4085, 4097), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (4095, 4097), True, 'import sqlalchemy as sa\n'), ((4146, 4158), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (4156, 4158), True, 'import sqlalchemy as sa\n'), ((4201, 4221), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4210, 4221), True, 'import sqlalchemy as sa\n'), ((4267, 4287), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4276, 4287), True, 'import sqlalchemy as sa\n'), ((4328, 4348), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4337, 4348), True, 'import sqlalchemy as sa\n'), ((4390, 4410), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4399, 4410), True, 'import sqlalchemy as sa\n'), ((4458, 4478), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4467, 4478), True, 'import sqlalchemy as sa\n'), ((4530, 4542), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (4540, 4542), True, 'import sqlalchemy as sa\n'), ((4584, 4604), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4593, 4604), True, 'import sqlalchemy as sa\n'), ((4645, 4665), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4654, 4665), True, 'import sqlalchemy as sa\n'), ((4706, 4726), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4715, 4726), True, 'import sqlalchemy as sa\n'), ((4775, 4795), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4784, 4795), True, 'import sqlalchemy as sa\n'), ((4839, 4859), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4848, 4859), True, 'import sqlalchemy as sa\n'), ((4903, 4923), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4912, 4923), True, 'import sqlalchemy as sa\n'), ((4966, 4986), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (4975, 4986), True, 'import sqlalchemy as sa\n'), ((5026, 5046), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (5035, 5046), True, 'import sqlalchemy as sa\n'), ((5089, 5109), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (5098, 5109), True, 'import sqlalchemy as sa\n'), ((5237, 5249), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (5247, 5249), True, 'import sqlalchemy as sa\n'), ((5290, 5310), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (5299, 5310), True, 'import sqlalchemy as sa\n'), ((5367, 5387), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (5376, 5387), True, 'import sqlalchemy as sa\n'), ((5444, 5464), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (5453, 5464), True, 'import sqlalchemy as sa\n'), ((5589, 5601), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (5599, 5601), True, 'import sqlalchemy as sa\n'), ((5642, 5662), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (5651, 5662), True, 'import sqlalchemy as sa\n'), ((5709, 5823), 'sqlalchemy.Enum', 'sa.Enum', (['"""Unrestricted"""', '"""Restricted"""', '"""Endowment"""'], {'name': '"""fundrestriction"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Unrestricted', 'Restricted', 'Endowment', name='fundrestriction',\n schema='glod', inherit_schema=True)\n", (5716, 5823), True, 'import sqlalchemy as sa\n'), ((5869, 5881), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (5879, 5881), True, 'import sqlalchemy as sa\n'), ((5928, 5940), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (5938, 5940), True, 'import sqlalchemy as sa\n'), ((5986, 5998), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (5996, 5998), True, 'import sqlalchemy as sa\n'), ((6207, 6219), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6217, 6219), True, 'import sqlalchemy as sa\n'), ((6262, 6363), 'sqlalchemy.Enum', 'sa.Enum', (['"""Current"""', '"""Prior"""'], {'name': '"""organisationaddressstatus"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Current', 'Prior', name='organisationaddressstatus', schema='glod',\n inherit_schema=True)\n", (6269, 6363), True, 'import sqlalchemy as sa\n'), ((6405, 6417), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6415, 6417), True, 'import sqlalchemy as sa\n'), ((6468, 6480), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6478, 6480), True, 'import sqlalchemy as sa\n'), ((6753, 6765), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6763, 6765), True, 'import sqlalchemy as sa\n'), ((6813, 6833), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (6822, 6833), True, 'import sqlalchemy as sa\n'), ((6879, 6899), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (6888, 6899), True, 'import sqlalchemy as sa\n'), ((6940, 6960), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (6949, 6960), True, 'import sqlalchemy as sa\n'), ((7002, 7108), 'sqlalchemy.Enum', 'sa.Enum', (['"""Active"""', '"""LostContact"""', '"""Deceased"""'], {'name': '"""personstatus"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Active', 'LostContact', 'Deceased', name='personstatus', schema=\n 'glod', inherit_schema=True)\n", (7009, 7108), True, 'import sqlalchemy as sa\n'), ((7145, 7165), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (7154, 7165), True, 'import sqlalchemy as sa\n'), ((7212, 7232), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (7221, 7232), True, 'import sqlalchemy as sa\n'), ((7273, 7293), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (7282, 7293), True, 'import sqlalchemy as sa\n'), ((7353, 7365), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (7363, 7365), True, 'import sqlalchemy as sa\n'), ((7416, 7428), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (7426, 7428), True, 'import sqlalchemy as sa\n'), ((7641, 7653), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (7651, 7653), True, 'import sqlalchemy as sa\n'), ((7694, 7703), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (7701, 7703), True, 'import sqlalchemy as sa\n'), ((7746, 7766), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (7755, 7766), True, 'import sqlalchemy as sa\n'), ((7810, 7830), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (7819, 7830), True, 'import sqlalchemy as sa\n'), ((7871, 7890), 'sqlalchemy.Numeric', 'sa.Numeric', ([], {'scale': '(2)'}), '(scale=2)\n', (7881, 7890), True, 'import sqlalchemy as sa\n'), ((7932, 7951), 'sqlalchemy.Numeric', 'sa.Numeric', ([], {'scale': '(2)'}), '(scale=2)\n', (7942, 7951), True, 'import sqlalchemy as sa\n'), ((7994, 8013), 'sqlalchemy.Numeric', 'sa.Numeric', ([], {'scale': '(2)'}), '(scale=2)\n', (8004, 8013), True, 'import sqlalchemy as sa\n'), ((8064, 8084), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (8073, 8084), True, 'import sqlalchemy as sa\n'), ((8138, 8252), 'sqlalchemy.Enum', 'sa.Enum', (['"""No"""', '"""Opening"""', '"""Closing"""'], {'name': '"""statementitemdesignatedbalance"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('No', 'Opening', 'Closing', name='statementitemdesignatedbalance',\n schema='glod', inherit_schema=True)\n", (8145, 8252), True, 'import sqlalchemy as sa\n'), ((8294, 8306), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8304, 8306), True, 'import sqlalchemy as sa\n'), ((8519, 8531), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8529, 8531), True, 'import sqlalchemy as sa\n'), ((8583, 8595), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (8593, 8595), True, 'import sqlalchemy as sa\n'), ((8644, 8657), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (8655, 8657), True, 'import sqlalchemy as sa\n'), ((8701, 8713), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (8711, 8713), True, 'import sqlalchemy as sa\n'), ((8757, 8769), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (8767, 8769), True, 'import sqlalchemy as sa\n'), ((8812, 8824), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (8822, 8824), True, 'import sqlalchemy as sa\n'), ((8864, 8876), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (8874, 8876), True, 'import sqlalchemy as sa\n'), ((8919, 8931), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (8929, 8931), True, 'import sqlalchemy as sa\n'), ((8976, 8988), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8986, 8988), True, 'import sqlalchemy as sa\n'), ((9187, 9199), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9197, 9199), True, 'import sqlalchemy as sa\n'), ((9248, 9260), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9258, 9260), True, 'import sqlalchemy as sa\n'), ((9305, 9325), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (9314, 9325), True, 'import sqlalchemy as sa\n'), ((9374, 9394), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (9383, 9394), True, 'import sqlalchemy as sa\n'), ((9436, 9456), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (9445, 9456), True, 'import sqlalchemy as sa\n'), ((9503, 9515), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (9513, 9515), True, 'import sqlalchemy as sa\n'), ((9559, 9571), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (9569, 9571), True, 'import sqlalchemy as sa\n'), ((9612, 9634), 'sqlalchemy.String', 'sa.String', ([], {'length': '(1024)'}), '(length=1024)\n', (9621, 9634), True, 'import sqlalchemy as sa\n'), ((9679, 9691), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9689, 9691), True, 'import sqlalchemy as sa\n'), ((9742, 9754), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9752, 9754), True, 'import sqlalchemy as sa\n'), ((10022, 10034), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10032, 10034), True, 'import sqlalchemy as sa\n'), ((10074, 10094), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (10083, 10094), True, 'import sqlalchemy as sa\n'), ((10143, 10163), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (10152, 10163), True, 'import sqlalchemy as sa\n'), ((10204, 10226), 'sqlalchemy.String', 'sa.String', ([], {'length': '(1024)'}), '(length=1024)\n', (10213, 10226), True, 'import sqlalchemy as sa\n'), ((10271, 10283), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10281, 10283), True, 'import sqlalchemy as sa\n'), ((10478, 10490), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10488, 10490), True, 'import sqlalchemy as sa\n'), ((10531, 10543), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10541, 10543), True, 'import sqlalchemy as sa\n'), ((10594, 10606), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10604, 10606), True, 'import sqlalchemy as sa\n'), ((10657, 10669), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10667, 10669), True, 'import sqlalchemy as sa\n'), ((10714, 10726), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (10724, 10726), True, 'import sqlalchemy as sa\n'), ((11002, 11014), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (11012, 11014), True, 'import sqlalchemy as sa\n'), ((11063, 11075), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (11073, 11075), True, 'import sqlalchemy as sa\n'), ((11122, 11142), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (11131, 11142), True, 'import sqlalchemy as sa\n'), ((11182, 11194), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (11192, 11194), True, 'import sqlalchemy as sa\n'), ((11235, 11247), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (11245, 11247), True, 'import sqlalchemy as sa\n'), ((11286, 11298), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (11296, 11298), True, 'import sqlalchemy as sa\n'), ((11348, 11727), 'sqlalchemy.Enum', 'sa.Enum', (['"""BankCharges"""', '"""BankTax"""', '"""BillpayOnline"""', '"""CashLodgmentEnvelopes"""', '"""CashLodgmentOther"""', '"""CashLodgmentPlate"""', '"""Cheque"""', '"""DirectDebit"""', '"""DirectPayment"""', '"""DirectTransfer"""', '"""InBranch"""', '"""StandingOrderMonthly"""', '"""StandingOrderOther"""', '"""StandingOrderQuarterly"""', '"""StandingOrders"""', '"""UnrealisedGainLoss"""'], {'name': '"""paymentmethod"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('BankCharges', 'BankTax', 'BillpayOnline', 'CashLodgmentEnvelopes',\n 'CashLodgmentOther', 'CashLodgmentPlate', 'Cheque', 'DirectDebit',\n 'DirectPayment', 'DirectTransfer', 'InBranch', 'StandingOrderMonthly',\n 'StandingOrderOther', 'StandingOrderQuarterly', 'StandingOrders',\n 'UnrealisedGainLoss', name='paymentmethod', schema='glod',\n inherit_schema=True)\n", (11355, 11727), True, 'import sqlalchemy as sa\n'), ((11754, 11776), 'sqlalchemy.String', 'sa.String', ([], {'length': '(1024)'}), '(length=1024)\n', (11763, 11776), True, 'import sqlalchemy as sa\n'), ((11818, 11837), 'sqlalchemy.Numeric', 'sa.Numeric', ([], {'scale': '(2)'}), '(scale=2)\n', (11828, 11837), True, 'import sqlalchemy as sa\n'), ((11891, 11989), 'sqlalchemy.Enum', 'sa.Enum', (['"""Income"""', '"""Expenditure"""'], {'name': '"""incomeexpenditure"""', 'schema': '"""glod"""', 'inherit_schema': '(True)'}), "('Income', 'Expenditure', name='incomeexpenditure', schema='glod',\n inherit_schema=True)\n", (11898, 11989), True, 'import sqlalchemy as sa\n'), ((12023, 12043), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (12032, 12043), True, 'import sqlalchemy as sa\n'), ((12087, 12109), 'sqlalchemy.String', 'sa.String', ([], {'length': '(1024)'}), '(length=1024)\n', (12096, 12109), True, 'import sqlalchemy as sa\n'), ((12160, 12172), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (12170, 12172), True, 'import sqlalchemy as sa\n'), ((12218, 12230), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (12228, 12230), True, 'import sqlalchemy as sa\n'), ((12273, 12285), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (12283, 12285), True, 'import sqlalchemy as sa\n'), ((12631, 12643), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (12641, 12643), True, 'import sqlalchemy as sa\n'), ((12694, 12706), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (12704, 12706), True, 'import sqlalchemy as sa\n'), ((12759, 12771), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (12769, 12771), True, 'import sqlalchemy as sa\n')] |
"""Hparams"""
import argparse as ap
import tensorflow as tf
from pathlib import Path
HOME = str(Path.home())
HPARAM_CHOICES= {
"model": ["cpdb", "copy", "bdrnn", "cpdb2", "cpdb2_prot"],
"optimizer": ["adam", "sgd", "adadelta"],
"unit_type": ["lstm", "lstmblock", "nlstm", "gru"],
"train_helper": ["teacher", "sched"],
"sched_decay": ["linear", "expon", "inv_sig"],
"initializer": ["glorot_normal", "glorot_uniform", "orthogonal"],
"decoder": ["greedy", "beam"],
}
HPARAMS = ["num_features", "num_labels", "initializer", "dense_input",
"unit_type", "num_units", "num_layers", "depth", "num_residual_layers",
"use_highway_as_residual",
"forget_bias", "dropout", "decoder", "beam_width", "batch_size",
"num_epochs", "train_helper", "sched_decay", "optimizer",
"learning_rate", "momentum", "max_gradient_norm",
"colocate_gradients_with_ops", "num_keep_ckpts",
"model", "train_file", "valid_file", "infer_file", "modeldir",
"train_source_file", "train_target_file", "valid_source_file",
"valid_target_file", "infer_source_file", "infer_target_file"]
def hparams_to_str(hparams):
print("Hyperparameters")
for hp in HPARAMS:
if hp in vars(hparams):
print("\t"+hp+": ", vars(hparams)[hp])
def get_hparam_parser():
parser = ap.ArgumentParser(description="Hyperparameters", add_help=False,
argument_default=ap.SUPPRESS)
gen_group = parser.add_argument_group("general")
gen_group.add_argument("-m", "--model", type=str,
choices=HPARAM_CHOICES["model"])
gen_group.add_argument("--train_file", type=str)
gen_group.add_argument("--valid_file", type=str)
gen_group.add_argument("--infer_file", type=str)
gen_group.add_argument("--train_source_file", type=str)
gen_group.add_argument("--train_target_file", type=str)
gen_group.add_argument("--valid_source_file", type=str)
gen_group.add_argument("--valid_target_file", type=str)
gen_group.add_argument("--infer_source_file", type=str)
gen_group.add_argument("--infer_target_file", type=str)
arch_group = parser.add_argument_group("architecture")
arch_group.add_argument("--num_features", type=int)
arch_group.add_argument("--num_labels", type=int)
arch_group.add_argument("--initializer", type=str,
choices=HPARAM_CHOICES["initializer"])
arch_group.add_argument("--dense_input", type=bool)
arch_group.add_argument("--unit_type", type=str,
choices=HPARAM_CHOICES["unit_type"])
arch_group.add_argument("--num_units", type=int)
arch_group.add_argument("--num_layers", type=int)
arch_group.add_argument("--depth", type=int)
arch_group.add_argument("--num_residual_layers", type=int)
arch_group.add_argument("--use_highway_as_residual", type=bool)
arch_group.add_argument("--forget_bias", type=float)
arch_group.add_argument("--dropout", type=float)
arch_group.add_argument("--decoder", type=str)
arch_group.add_argument("--beam_width", type=int)
tr_group = parser.add_argument_group("training")
tr_group.add_argument("--batch_size", type=int)
tr_group.add_argument("--num_epochs", type=int)
tr_group.add_argument("--train_helper", type=str,
choices=HPARAM_CHOICES["train_helper"])
tr_group.add_argument("--sched_decay", type=str,
choices=HPARAM_CHOICES["sched_decay"])
tr_group.add_argument("--optimizer", type=str,
choices=HPARAM_CHOICES["optimizer"])
tr_group.add_argument("--learning_rate", type=float)
tr_group.add_argument("--momentum", type=float)
tr_group.add_argument("--max_gradient_norm", type=float)
tr_group.add_argument("--colocate_gradients_with_ops", type=bool)
tr_group.add_argument("--num_keep_ckpts", type=int)
return parser
def get_hparams(setting):
"""Return the hyperparameter settings given by name."""
hparams = tf.contrib.training.HParams()
if setting == "cpdb":
hparams = tf.contrib.training.HParams(
model="cpdb",
num_features=43,
num_labels=9,
unit_type="lstmblock",
initializer="glorot_uniform",
dense_input=True,
num_units=256,
num_layers=2,
num_residual_layers=2,
use_highway_as_residual=False,
depth=0,
forget_bias=1,
dropout=0.0,
batch_size=64,
num_epochs=400,
optimizer="adadelta",
learning_rate=0.05,
momentum=0.0,
max_gradient_norm=50.,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="none",
num_keep_ckpts=2,
train_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords",
valid_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords",
)
elif setting == "cpdb2":
hparams = tf.contrib.training.HParams(
model="cpdb",
num_features=30,
num_labels=10,
unit_type="lstmblock",
initializer="glorot_uniform",
dense_input=True,
num_units=256,
num_layers=2,
num_residual_layers=2,
use_highway_as_residual=False,
depth=0,
forget_bias=1,
dropout=0.0,
batch_size=64,
num_epochs=400,
optimizer="adadelta",
learning_rate=0.05,
momentum=0.0,
max_gradient_norm=50.,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="none",
num_keep_ckpts=2,
train_file="/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_train_1.tfrecords",
valid_file="/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_valid_1.tfrecords",
)
elif setting == "cpdb2_prot":
hparams = tf.contrib.training.HParams(
model="cpdb2_prot",
num_features=30,
num_labels=10,
unit_type="lstmblock",
initializer="glorot_uniform",
dense_input=True,
num_units=256,
num_layers=2,
num_residual_layers=2,
use_highway_as_residual=False,
depth=0,
forget_bias=1,
dropout=0.0,
batch_size=64,
num_epochs=400,
optimizer="adadelta",
learning_rate=0.05,
momentum=0.0,
max_gradient_norm=50.,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="none",
num_keep_ckpts=2,
train_source_file="/home/dillon/data/cpdb2/cpdb2_train_source.txt",
train_target_file="/home/dillon/data/cpdb2/cpdb2_train_target.txt",
valid_source_file="/home/dillon/data/cpdb2/cpdb2_valid_source.txt",
valid_target_file="/home/dillon/data/cpdb2/cpdb2_valid_target.txt",
)
elif setting == "copy":
hparams = tf.contrib.training.HParams(
model="copy",
num_features=12,
num_labels=12,
unit_type="nlstm",
initializer="glorot_uniform",
dense_input=False,
num_units=128,
num_layers=1,
num_residual_layers=0,
depth=3,
forget_bias=1,
dropout=0.0,
batch_size=100,
num_epochs=500,
optimizer="sgd",
learning_rate=0.5,
momentum=0.,
max_gradient_norm=1.0,
colocate_gradients_with_ops=False,
train_helper="sched",
sched_decay="linear",
num_keep_ckpts=1,
train_file="/home/dillon/data/synthetic/copy/train_100L_10k.tfrecords",
valid_file="/home/dillon/data/synthetic/copy/valid_100L_1k.tfrecords",
)
elif setting == "bdrnn":
hparams = tf.contrib.training.HParams(
model="bdrnn",
num_features=43,
num_labels=9,
unit_type="lstmblock",
initializer="glorot_uniform",
num_units=300,
num_layers=3,
forget_bias=1,
num_dense_units=200,
dropout=0.5,
batch_size=128,
num_epochs=100,
optimizer="adadelta",
learning_rate=1.,
max_gradient_norm=0.5,
colocate_gradients_with_ops=False,
num_keep_ckpts=4,
train_helper="teacher",
train_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords",
valid_file="/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords",
)
return hparams
| [
"pathlib.Path.home",
"tensorflow.contrib.training.HParams",
"argparse.ArgumentParser"
] | [((97, 108), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (106, 108), False, 'from pathlib import Path\n'), ((1415, 1513), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""Hyperparameters"""', 'add_help': '(False)', 'argument_default': 'ap.SUPPRESS'}), "(description='Hyperparameters', add_help=False,\n argument_default=ap.SUPPRESS)\n", (1432, 1513), True, 'import argparse as ap\n'), ((4110, 4139), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {}), '()\n', (4137, 4139), True, 'import tensorflow as tf\n'), ((4184, 4845), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'model': '"""cpdb"""', 'num_features': '(43)', 'num_labels': '(9)', 'unit_type': '"""lstmblock"""', 'initializer': '"""glorot_uniform"""', 'dense_input': '(True)', 'num_units': '(256)', 'num_layers': '(2)', 'num_residual_layers': '(2)', 'use_highway_as_residual': '(False)', 'depth': '(0)', 'forget_bias': '(1)', 'dropout': '(0.0)', 'batch_size': '(64)', 'num_epochs': '(400)', 'optimizer': '"""adadelta"""', 'learning_rate': '(0.05)', 'momentum': '(0.0)', 'max_gradient_norm': '(50.0)', 'colocate_gradients_with_ops': '(False)', 'train_helper': '"""sched"""', 'sched_decay': '"""none"""', 'num_keep_ckpts': '(2)', 'train_file': '"""/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords"""', 'valid_file': '"""/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords"""'}), "(model='cpdb', num_features=43, num_labels=9,\n unit_type='lstmblock', initializer='glorot_uniform', dense_input=True,\n num_units=256, num_layers=2, num_residual_layers=2,\n use_highway_as_residual=False, depth=0, forget_bias=1, dropout=0.0,\n batch_size=64, num_epochs=400, optimizer='adadelta', learning_rate=0.05,\n momentum=0.0, max_gradient_norm=50.0, colocate_gradients_with_ops=False,\n train_helper='sched', sched_decay='none', num_keep_ckpts=2, train_file=\n '/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords',\n valid_file='/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords'\n )\n", (4211, 4845), True, 'import tensorflow as tf\n'), ((5165, 5829), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'model': '"""cpdb"""', 'num_features': '(30)', 'num_labels': '(10)', 'unit_type': '"""lstmblock"""', 'initializer': '"""glorot_uniform"""', 'dense_input': '(True)', 'num_units': '(256)', 'num_layers': '(2)', 'num_residual_layers': '(2)', 'use_highway_as_residual': '(False)', 'depth': '(0)', 'forget_bias': '(1)', 'dropout': '(0.0)', 'batch_size': '(64)', 'num_epochs': '(400)', 'optimizer': '"""adadelta"""', 'learning_rate': '(0.05)', 'momentum': '(0.0)', 'max_gradient_norm': '(50.0)', 'colocate_gradients_with_ops': '(False)', 'train_helper': '"""sched"""', 'sched_decay': '"""none"""', 'num_keep_ckpts': '(2)', 'train_file': '"""/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_train_1.tfrecords"""', 'valid_file': '"""/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_valid_1.tfrecords"""'}), "(model='cpdb', num_features=30, num_labels=10,\n unit_type='lstmblock', initializer='glorot_uniform', dense_input=True,\n num_units=256, num_layers=2, num_residual_layers=2,\n use_highway_as_residual=False, depth=0, forget_bias=1, dropout=0.0,\n batch_size=64, num_epochs=400, optimizer='adadelta', learning_rate=0.05,\n momentum=0.0, max_gradient_norm=50.0, colocate_gradients_with_ops=False,\n train_helper='sched', sched_decay='none', num_keep_ckpts=2, train_file=\n '/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_train_1.tfrecords',\n valid_file=\n '/home/dillon/data/cpdb2/tfrecords/cpdb2_14335_valid_1.tfrecords')\n", (5192, 5829), True, 'import tensorflow as tf\n'), ((6154, 6944), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'model': '"""cpdb2_prot"""', 'num_features': '(30)', 'num_labels': '(10)', 'unit_type': '"""lstmblock"""', 'initializer': '"""glorot_uniform"""', 'dense_input': '(True)', 'num_units': '(256)', 'num_layers': '(2)', 'num_residual_layers': '(2)', 'use_highway_as_residual': '(False)', 'depth': '(0)', 'forget_bias': '(1)', 'dropout': '(0.0)', 'batch_size': '(64)', 'num_epochs': '(400)', 'optimizer': '"""adadelta"""', 'learning_rate': '(0.05)', 'momentum': '(0.0)', 'max_gradient_norm': '(50.0)', 'colocate_gradients_with_ops': '(False)', 'train_helper': '"""sched"""', 'sched_decay': '"""none"""', 'num_keep_ckpts': '(2)', 'train_source_file': '"""/home/dillon/data/cpdb2/cpdb2_train_source.txt"""', 'train_target_file': '"""/home/dillon/data/cpdb2/cpdb2_train_target.txt"""', 'valid_source_file': '"""/home/dillon/data/cpdb2/cpdb2_valid_source.txt"""', 'valid_target_file': '"""/home/dillon/data/cpdb2/cpdb2_valid_target.txt"""'}), "(model='cpdb2_prot', num_features=30, num_labels\n =10, unit_type='lstmblock', initializer='glorot_uniform', dense_input=\n True, num_units=256, num_layers=2, num_residual_layers=2,\n use_highway_as_residual=False, depth=0, forget_bias=1, dropout=0.0,\n batch_size=64, num_epochs=400, optimizer='adadelta', learning_rate=0.05,\n momentum=0.0, max_gradient_norm=50.0, colocate_gradients_with_ops=False,\n train_helper='sched', sched_decay='none', num_keep_ckpts=2,\n train_source_file='/home/dillon/data/cpdb2/cpdb2_train_source.txt',\n train_target_file='/home/dillon/data/cpdb2/cpdb2_train_target.txt',\n valid_source_file='/home/dillon/data/cpdb2/cpdb2_valid_source.txt',\n valid_target_file='/home/dillon/data/cpdb2/cpdb2_valid_target.txt')\n", (6181, 6944), True, 'import tensorflow as tf\n'), ((7283, 7894), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'model': '"""copy"""', 'num_features': '(12)', 'num_labels': '(12)', 'unit_type': '"""nlstm"""', 'initializer': '"""glorot_uniform"""', 'dense_input': '(False)', 'num_units': '(128)', 'num_layers': '(1)', 'num_residual_layers': '(0)', 'depth': '(3)', 'forget_bias': '(1)', 'dropout': '(0.0)', 'batch_size': '(100)', 'num_epochs': '(500)', 'optimizer': '"""sgd"""', 'learning_rate': '(0.5)', 'momentum': '(0.0)', 'max_gradient_norm': '(1.0)', 'colocate_gradients_with_ops': '(False)', 'train_helper': '"""sched"""', 'sched_decay': '"""linear"""', 'num_keep_ckpts': '(1)', 'train_file': '"""/home/dillon/data/synthetic/copy/train_100L_10k.tfrecords"""', 'valid_file': '"""/home/dillon/data/synthetic/copy/valid_100L_1k.tfrecords"""'}), "(model='copy', num_features=12, num_labels=12,\n unit_type='nlstm', initializer='glorot_uniform', dense_input=False,\n num_units=128, num_layers=1, num_residual_layers=0, depth=3,\n forget_bias=1, dropout=0.0, batch_size=100, num_epochs=500, optimizer=\n 'sgd', learning_rate=0.5, momentum=0.0, max_gradient_norm=1.0,\n colocate_gradients_with_ops=False, train_helper='sched', sched_decay=\n 'linear', num_keep_ckpts=1, train_file=\n '/home/dillon/data/synthetic/copy/train_100L_10k.tfrecords', valid_file\n ='/home/dillon/data/synthetic/copy/valid_100L_1k.tfrecords')\n", (7310, 7894), True, 'import tensorflow as tf\n'), ((8205, 8770), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'model': '"""bdrnn"""', 'num_features': '(43)', 'num_labels': '(9)', 'unit_type': '"""lstmblock"""', 'initializer': '"""glorot_uniform"""', 'num_units': '(300)', 'num_layers': '(3)', 'forget_bias': '(1)', 'num_dense_units': '(200)', 'dropout': '(0.5)', 'batch_size': '(128)', 'num_epochs': '(100)', 'optimizer': '"""adadelta"""', 'learning_rate': '(1.0)', 'max_gradient_norm': '(0.5)', 'colocate_gradients_with_ops': '(False)', 'num_keep_ckpts': '(4)', 'train_helper': '"""teacher"""', 'train_file': '"""/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords"""', 'valid_file': '"""/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords"""'}), "(model='bdrnn', num_features=43, num_labels=9,\n unit_type='lstmblock', initializer='glorot_uniform', num_units=300,\n num_layers=3, forget_bias=1, num_dense_units=200, dropout=0.5,\n batch_size=128, num_epochs=100, optimizer='adadelta', learning_rate=1.0,\n max_gradient_norm=0.5, colocate_gradients_with_ops=False,\n num_keep_ckpts=4, train_helper='teacher', train_file=\n '/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_train_1.tfrecords',\n valid_file='/home/dillon/data/cpdb/cv_5/cpdb_6133_filter_valid_1.tfrecords'\n )\n", (8232, 8770), True, 'import tensorflow as tf\n')] |
from kivy.lang import Builder
from kivy.metrics import dp
from kivy import properties as p
from kivy.animation import Animation
from kivymd.app import MDApp as App
from kivymd.uix.screen import MDScreen
class HomeMainScreen(MDScreen):
bg_pos = p.NumericProperty(0)
def toggle_bg_pos(self):
bg_pos = 0 if self.bg_pos > 0 else dp(self.height/2)
Animation(bg_pos=bg_pos).start(self)
with open('views/home.kv', encoding='utf-8') as f:
Builder.load_string(f.read())
class HomeScreenApp(App):
def build(self):
return HomeMainScreen()
def main():
HomeScreenApp().run()
if __name__ == '__main__':
main()
| [
"kivy.animation.Animation",
"kivy.metrics.dp",
"kivy.properties.NumericProperty"
] | [((251, 271), 'kivy.properties.NumericProperty', 'p.NumericProperty', (['(0)'], {}), '(0)\n', (268, 271), True, 'from kivy import properties as p\n'), ((349, 368), 'kivy.metrics.dp', 'dp', (['(self.height / 2)'], {}), '(self.height / 2)\n', (351, 368), False, 'from kivy.metrics import dp\n'), ((375, 399), 'kivy.animation.Animation', 'Animation', ([], {'bg_pos': 'bg_pos'}), '(bg_pos=bg_pos)\n', (384, 399), False, 'from kivy.animation import Animation\n')] |
# def draw_nx(g, labels=None):
# import matplotlib.pyplot as plt
# if labels is not None:
# g = nx.relabel_nodes(g, labels)
# pos = nx.kamada_kawai_layout(g)
# nx.draw(g, pos, with_labels=True)
# plt.show()
#
# def draw_nx_attributes_as_labels(g, attribute):
# # import pylab
# import matplotlib.pyplot as plt
# import networkx as nx
# labels = nx.get_node_attributes(g, attribute)
# pos = nx.kamada_kawai_layout(g)
# nx.draw(g, pos, labels=labels, with_labels=True)
# # nx.draw(g, labels=labels)
# # pylab.show()
# plt.show()
#
# def draw_nx_with_pygraphviz(g, path2file=None, save_file=False):
# attribute_name = None
# draw_nx_with_pygraphviz_attribtes_as_labels(g, attribute_name, path2file, save_file)
#
# def draw_nx_with_pygraphviz_attribtes_as_labels(g, attribute_name, path2file=None, save_file=False):
# import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
#
# # https://stackoverflow.com/questions/15345192/draw-more-information-on-graph-nodes-using-pygraphviz
# # https://stackoverflow.com/a/67442702/1601580
#
# if path2file is None:
# path2file = './example.png'
# path2file = Path(path2file).expanduser()
# save_file = True
# if type(path2file) == str:
# path2file = Path(path2file).expanduser()
# save_file = True
#
# print(f'\n{g.is_directed()=}')
# g = nx.nx_agraph.to_agraph(g)
# if attribute_name is not None:
# print(f'{g=}')
# # to label in pygrapviz make sure to have the AGraph obj have the label attribute set on the nodes
# g = str(g)
# g = g.replace(attribute_name, 'label')
# print(g)
# # g = pgv.AGraph(g)
# g = pgv.AGraph(g)
# g.layout()
# g.draw(path2file)
#
# # https://stackoverflow.com/questions/20597088/display-a-png-image-from-python-on-mint-15-linux
# img = mpimg.imread(path2file)
# plt.imshow(img)
# plt.show()
#
# # remove file https://stackoverflow.com/questions/6996603/how-to-delete-a-file-or-folder
# if not save_file:
# path2file.unlink()
# tests
def test1():
# conda install -y pytorch-geometric -c rusty1s -c conda-forge
import torch
from torch_geometric.data import Data
# [2, number_edges], edge = (node_idx1, node_idx2), e.g. e = (0,1) = (n0, n1) (note this is reflected on the type torch long)
edge_index = torch.tensor([[0, 1, 1, 2],
[1, 0, 2, 1]], dtype=torch.long)
# features to each node [num_nodes, D]
x = torch.tensor([[0.0], [-1.0], [1.0]])
data = Data(x=x, edge_index=edge_index)
print(data)
# https://discuss.pytorch.org/t/pytorch-geometric/44994
# https://stackoverflow.com/questions/61274847/how-to-visualize-a-torch-geometric-graph-in-python
import networkx as nx
from torch_geometric.utils.convert import to_networkx
g = to_networkx(data)
nx.draw(g)
pass
if __name__ == '__main__':
test1()
print("Done\a") | [
"torch.tensor",
"networkx.draw",
"torch_geometric.data.Data",
"torch_geometric.utils.convert.to_networkx"
] | [((2444, 2504), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 2], [1, 0, 2, 1]]'], {'dtype': 'torch.long'}), '([[0, 1, 1, 2], [1, 0, 2, 1]], dtype=torch.long)\n', (2456, 2504), False, 'import torch\n'), ((2589, 2625), 'torch.tensor', 'torch.tensor', (['[[0.0], [-1.0], [1.0]]'], {}), '([[0.0], [-1.0], [1.0]])\n', (2601, 2625), False, 'import torch\n'), ((2638, 2670), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index'}), '(x=x, edge_index=edge_index)\n', (2642, 2670), False, 'from torch_geometric.data import Data\n'), ((2944, 2961), 'torch_geometric.utils.convert.to_networkx', 'to_networkx', (['data'], {}), '(data)\n', (2955, 2961), False, 'from torch_geometric.utils.convert import to_networkx\n'), ((2966, 2976), 'networkx.draw', 'nx.draw', (['g'], {}), '(g)\n', (2973, 2976), True, 'import networkx as nx\n')] |
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from ros_mstar.srv import MStarSrv
import sys
class MinimalClientAsync(Node):
def __init__(self):
super().__init__('minimal_client_async')
self.cli = self.create_client(MStarSrv, mstar_service)
while not self.cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
self.req = MStarSrv.Request()
def send_request(self, start1_x, start1_y, goal1_x, goal1_y, start2_x, start2_y, goal2_x, goal2y):
req.start1_x = start1_x
req.start1_y = start1_y
req.goal1_x = goal1_x
req.goal1_y = goal1_y
req.start2_x = start2_x
req.start2_y = start2_y
req.goal2_x = goal2_x
req.goal2y = goal2y
self.future = self.cli.call_async(self.req)
def main(args=None):
rclpy.init(args=args)
mstar_service = args[0]
start1_x = float(args[1])
start1_y = float(args[2])
goal1_x = float(args[3])
goal1_y = float(args[4])
start2_x = float(args[5])
start2_y = float(args[6])
goal2_x = float(args[7])
goal2y = float(args[8])
minimal_client = MinimalClientAsync()
minimal_client.send_request(start1_x, start1_y, goal1_x, goal1_y, start2_x, start2_y, goal2_x, goal2y)
while rclpy.ok():
rclpy.spin_once(minimal_client)
if minimal_client.future.done():
if minimal_client.future.result() is not None:
response = minimal_client.future.result()
minimal_client.get_logger().info(
"Path 1: " + str(response.r1_path))
minimal_client.get_logger().info(
"Path 2: " + str(response.r2_path))
else:
minimal_client.get_logger().info(
'Service call failed %r' % (minimal_client.future.exception(),))
break
minimal_client.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main(sys.argv[1:]) | [
"rclpy.ok",
"ros_mstar.srv.MStarSrv.Request",
"rclpy.spin_once",
"rclpy.init",
"rclpy.shutdown"
] | [((1467, 1488), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (1477, 1488), False, 'import rclpy\n'), ((1914, 1924), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (1922, 1924), False, 'import rclpy\n'), ((2546, 2562), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2560, 2562), False, 'import rclpy\n'), ((1019, 1037), 'ros_mstar.srv.MStarSrv.Request', 'MStarSrv.Request', ([], {}), '()\n', (1035, 1037), False, 'from ros_mstar.srv import MStarSrv\n'), ((1934, 1965), 'rclpy.spin_once', 'rclpy.spin_once', (['minimal_client'], {}), '(minimal_client)\n', (1949, 1965), False, 'import rclpy\n')] |
import atexit
import contextlib
import time
from typing import Any, List, Type
from unittest import mock
import pytest
import region_profiler.global_instance
import region_profiler.profiler
from region_profiler import RegionProfiler, func
from region_profiler import install as install_profiler
from region_profiler import iter_proxy, region
from region_profiler import reporter_columns as cols
from region_profiler.reporters import SilentReporter
from region_profiler.utils import Timer
def get_timer_cls(use_cython: bool) -> Type[Timer]:
if use_cython:
raise RuntimeError("Cython support is dropped")
return Timer
@contextlib.contextmanager
def fresh_region_profiler(monkeypatch):
"""Reset ``region_profiler`` module before a next integration test."""
region_profiler.global_instance._profiler = None
atexit_functions = []
monkeypatch.setattr(atexit, "register", lambda foo: atexit_functions.append(foo))
yield None
for callback in reversed(atexit_functions):
callback()
return
@pytest.mark.parametrize("multiple_runs", [0, 1, 2])
def test_reload_works(monkeypatch, multiple_runs):
"""Test that ``fresh_module`` fixture properly
resets ``region_profiler`` module.
"""
reporter = SilentReporter([cols.name])
with fresh_region_profiler(monkeypatch):
assert region_profiler.global_instance._profiler is None
install_profiler(reporter)
assert isinstance(region_profiler.global_instance._profiler, RegionProfiler)
assert reporter.rows == [["name"], [RegionProfiler.ROOT_NODE_NAME]]
@pytest.mark.parametrize("use_cython", [False])
def test_with_fake_timer(monkeypatch, use_cython):
"""Integration test with a fake timer."""
reporter = SilentReporter(
[cols.name, cols.total_us, cols.total_inner_us, cols.count]
)
mock_clock = mock.Mock()
mock_clock.side_effect = list(range(0, 100, 1))
@func()
def foo():
with region("a"):
for i in iter_proxy([1, 2, 3], "iter"):
with region("b"):
pass
with region("b"):
pass
with fresh_region_profiler(monkeypatch):
install_profiler(
reporter=reporter, timer_cls=lambda: get_timer_cls(use_cython)(mock_clock)
)
foo()
with region("x"):
pass
foo()
expected = [
["name", "total_us", "total_inner_us", "count"],
[RegionProfiler.ROOT_NODE_NAME, "54000000", "5000000", "1"],
["foo()", "48000000", "4000000", "2"],
["a", "44000000", "26000000", "2"],
["b", "12000000", "12000000", "12"],
["iter", "6000000", "6000000", "6"],
["x", "1000000", "1000000", "1"],
]
assert reporter.rows == expected
@pytest.mark.parametrize("use_cython", [False])
def test_with_global_regions(monkeypatch, use_cython):
"""Integration test with regions marked as globals."""
reporter = SilentReporter(
[cols.name, cols.total_us, cols.total_inner_us, cols.count]
)
mock_clock = mock.Mock()
mock_clock.side_effect = list(range(0, 100, 1))
@func(asglobal=True)
def bar():
with region("a"):
with region("bar_global", asglobal=True):
for i in iter_proxy([1, 2, 3], "iter", asglobal=True):
pass
@func()
def foo():
with region("a"):
for i in iter_proxy([1, 2, 3], "iter"):
with region("b"):
pass
with region("b"):
pass
bar()
with fresh_region_profiler(monkeypatch):
install_profiler(
reporter=reporter, timer_cls=lambda: get_timer_cls(use_cython)(mock_clock)
)
foo()
with region("x"):
pass
foo()
expected = [
["name", "total_us", "total_inner_us", "count"],
[RegionProfiler.ROOT_NODE_NAME, "84000000", "0", "1"],
["foo()", "78000000", "4000000", "2"],
["a", "74000000", "56000000", "2"],
["b", "12000000", "12000000", "12"],
["iter", "6000000", "6000000", "6"],
["bar()", "28000000", "4000000", "2"],
["a", "24000000", "24000000", "2"],
["bar_global", "20000000", "20000000", "2"],
["iter", "6000000", "6000000", "6"],
["x", "1000000", "1000000", "1"],
]
assert reporter.rows == expected
@pytest.mark.parametrize("use_cython", [False])
def test_with_real_timer(monkeypatch, use_cython):
"""Integration test with a real timer."""
reporter = SilentReporter(
[cols.name, cols.total_us, cols.total_inner_us, cols.count]
)
def slow_iter(iterable):
for x in iterable:
time.sleep(0.1)
yield x
@func()
def foo():
time.sleep(0.02)
with region("a"):
time.sleep(0.02)
for i in iter_proxy(slow_iter([0.1, 0.2, 0.3]), "iter"):
with region("b"):
time.sleep(i)
with fresh_region_profiler(monkeypatch):
install_profiler(reporter)
foo()
with region("x"):
time.sleep(0.5)
foo()
expected: List[List[Any]] = [
[RegionProfiler.ROOT_NODE_NAME, 2380000, 0, "1"],
["foo()", 1880000, 40000, "2"],
["a", 1840000, 40000, "2"],
["b", 1200000, 1200000, "6"],
["iter", 600000, 600000, "6"],
["x", 500000, 500000, "1"],
]
# (fresh_region_profiler calls dump_profiler)
rows = reporter.rows[1:] # type: ignore[index]
lower = 0.99
upper = 1.03
upper_delta = 5000
assert len(rows) == len(expected)
print(rows)
for i, (r, e) in enumerate(zip(rows, expected)):
assert r[0] == e[0]
assert r[3] == e[3]
if i == 0:
assert int(r[1]) > e[1]
else:
assert e[1] * lower <= int(r[1]) <= e[1] * upper + upper_delta
assert e[2] * lower <= int(r[2]) <= e[2] * upper + upper_delta
@pytest.mark.parametrize("use_cython", [False])
def test_automatic_naming(monkeypatch, use_cython):
"""Integration test with regions with automatic naming."""
reporter = SilentReporter([cols.name])
mock_clock = mock.Mock()
mock_clock.side_effect = list(range(0, 100, 1))
@func()
def foo():
with region():
for i in iter_proxy([1, 2, 3]):
pass
with fresh_region_profiler(monkeypatch):
install_profiler(
reporter=reporter, timer_cls=lambda: get_timer_cls(use_cython)(mock_clock)
)
foo()
expected = [
["name"],
[RegionProfiler.ROOT_NODE_NAME],
["foo()"],
["foo() <test_module.py:198>"],
["foo() <test_module.py:199>"],
]
assert reporter.rows == expected
| [
"region_profiler.reporters.SilentReporter",
"unittest.mock.Mock",
"region_profiler.install",
"region_profiler.func",
"time.sleep",
"pytest.mark.parametrize",
"region_profiler.region",
"region_profiler.iter_proxy"
] | [((1040, 1091), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multiple_runs"""', '[0, 1, 2]'], {}), "('multiple_runs', [0, 1, 2])\n", (1063, 1091), False, 'import pytest\n'), ((1589, 1635), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cython"""', '[False]'], {}), "('use_cython', [False])\n", (1612, 1635), False, 'import pytest\n'), ((2797, 2843), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cython"""', '[False]'], {}), "('use_cython', [False])\n", (2820, 2843), False, 'import pytest\n'), ((4440, 4486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cython"""', '[False]'], {}), "('use_cython', [False])\n", (4463, 4486), False, 'import pytest\n'), ((6035, 6081), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cython"""', '[False]'], {}), "('use_cython', [False])\n", (6058, 6081), False, 'import pytest\n'), ((1256, 1283), 'region_profiler.reporters.SilentReporter', 'SilentReporter', (['[cols.name]'], {}), '([cols.name])\n', (1270, 1283), False, 'from region_profiler.reporters import SilentReporter\n'), ((1748, 1823), 'region_profiler.reporters.SilentReporter', 'SilentReporter', (['[cols.name, cols.total_us, cols.total_inner_us, cols.count]'], {}), '([cols.name, cols.total_us, cols.total_inner_us, cols.count])\n', (1762, 1823), False, 'from region_profiler.reporters import SilentReporter\n'), ((1855, 1866), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1864, 1866), False, 'from unittest import mock\n'), ((1925, 1931), 'region_profiler.func', 'func', ([], {}), '()\n', (1929, 1931), False, 'from region_profiler import RegionProfiler, func\n'), ((2973, 3048), 'region_profiler.reporters.SilentReporter', 'SilentReporter', (['[cols.name, cols.total_us, cols.total_inner_us, cols.count]'], {}), '([cols.name, cols.total_us, cols.total_inner_us, cols.count])\n', (2987, 3048), False, 'from region_profiler.reporters import SilentReporter\n'), ((3080, 3091), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3089, 3091), False, 'from unittest import mock\n'), ((3150, 3169), 'region_profiler.func', 'func', ([], {'asglobal': '(True)'}), '(asglobal=True)\n', (3154, 3169), False, 'from region_profiler import RegionProfiler, func\n'), ((3367, 3373), 'region_profiler.func', 'func', ([], {}), '()\n', (3371, 3373), False, 'from region_profiler import RegionProfiler, func\n'), ((4599, 4674), 'region_profiler.reporters.SilentReporter', 'SilentReporter', (['[cols.name, cols.total_us, cols.total_inner_us, cols.count]'], {}), '([cols.name, cols.total_us, cols.total_inner_us, cols.count])\n', (4613, 4674), False, 'from region_profiler.reporters import SilentReporter\n'), ((4800, 4806), 'region_profiler.func', 'func', ([], {}), '()\n', (4804, 4806), False, 'from region_profiler import RegionProfiler, func\n'), ((6212, 6239), 'region_profiler.reporters.SilentReporter', 'SilentReporter', (['[cols.name]'], {}), '([cols.name])\n', (6226, 6239), False, 'from region_profiler.reporters import SilentReporter\n'), ((6257, 6268), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6266, 6268), False, 'from unittest import mock\n'), ((6327, 6333), 'region_profiler.func', 'func', ([], {}), '()\n', (6331, 6333), False, 'from region_profiler import RegionProfiler, func\n'), ((1402, 1428), 'region_profiler.install', 'install_profiler', (['reporter'], {}), '(reporter)\n', (1418, 1428), True, 'from region_profiler import install as install_profiler\n'), ((4830, 4846), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (4840, 4846), False, 'import time\n'), ((5093, 5119), 'region_profiler.install', 'install_profiler', (['reporter'], {}), '(reporter)\n', (5109, 5119), True, 'from region_profiler import install as install_profiler\n'), ((1960, 1971), 'region_profiler.region', 'region', (['"""a"""'], {}), "('a')\n", (1966, 1971), False, 'from region_profiler import iter_proxy, region\n'), ((1994, 2023), 'region_profiler.iter_proxy', 'iter_proxy', (['[1, 2, 3]', '"""iter"""'], {}), "([1, 2, 3], 'iter')\n", (2004, 2023), False, 'from region_profiler import iter_proxy, region\n'), ((2339, 2350), 'region_profiler.region', 'region', (['"""x"""'], {}), "('x')\n", (2345, 2350), False, 'from region_profiler import iter_proxy, region\n'), ((3198, 3209), 'region_profiler.region', 'region', (['"""a"""'], {}), "('a')\n", (3204, 3209), False, 'from region_profiler import iter_proxy, region\n'), ((3402, 3413), 'region_profiler.region', 'region', (['"""a"""'], {}), "('a')\n", (3408, 3413), False, 'from region_profiler import iter_proxy, region\n'), ((3436, 3465), 'region_profiler.iter_proxy', 'iter_proxy', (['[1, 2, 3]', '"""iter"""'], {}), "([1, 2, 3], 'iter')\n", (3446, 3465), False, 'from region_profiler import iter_proxy, region\n'), ((3799, 3810), 'region_profiler.region', 'region', (['"""x"""'], {}), "('x')\n", (3805, 3810), False, 'from region_profiler import iter_proxy, region\n'), ((4758, 4773), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4768, 4773), False, 'import time\n'), ((4860, 4871), 'region_profiler.region', 'region', (['"""a"""'], {}), "('a')\n", (4866, 4871), False, 'from region_profiler import iter_proxy, region\n'), ((4885, 4901), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (4895, 4901), False, 'import time\n'), ((5147, 5158), 'region_profiler.region', 'region', (['"""x"""'], {}), "('x')\n", (5153, 5158), False, 'from region_profiler import iter_proxy, region\n'), ((5172, 5187), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5182, 5187), False, 'import time\n'), ((6362, 6370), 'region_profiler.region', 'region', ([], {}), '()\n', (6368, 6370), False, 'from region_profiler import iter_proxy, region\n'), ((6393, 6414), 'region_profiler.iter_proxy', 'iter_proxy', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (6403, 6414), False, 'from region_profiler import iter_proxy, region\n'), ((3228, 3263), 'region_profiler.region', 'region', (['"""bar_global"""'], {'asglobal': '(True)'}), "('bar_global', asglobal=True)\n", (3234, 3263), False, 'from region_profiler import iter_proxy, region\n'), ((3290, 3334), 'region_profiler.iter_proxy', 'iter_proxy', (['[1, 2, 3]', '"""iter"""'], {'asglobal': '(True)'}), "([1, 2, 3], 'iter', asglobal=True)\n", (3300, 3334), False, 'from region_profiler import iter_proxy, region\n'), ((2046, 2057), 'region_profiler.region', 'region', (['"""b"""'], {}), "('b')\n", (2052, 2057), False, 'from region_profiler import iter_proxy, region\n'), ((2105, 2116), 'region_profiler.region', 'region', (['"""b"""'], {}), "('b')\n", (2111, 2116), False, 'from region_profiler import iter_proxy, region\n'), ((3488, 3499), 'region_profiler.region', 'region', (['"""b"""'], {}), "('b')\n", (3494, 3499), False, 'from region_profiler import iter_proxy, region\n'), ((3547, 3558), 'region_profiler.region', 'region', (['"""b"""'], {}), "('b')\n", (3553, 3558), False, 'from region_profiler import iter_proxy, region\n'), ((4992, 5003), 'region_profiler.region', 'region', (['"""b"""'], {}), "('b')\n", (4998, 5003), False, 'from region_profiler import iter_proxy, region\n'), ((5025, 5038), 'time.sleep', 'time.sleep', (['i'], {}), '(i)\n', (5035, 5038), False, 'import time\n')] |
#!/usr/bin/env python
import datetime
import logging
import time
from threading import Thread
import requests
from requests.auth import HTTPBasicAuth
import settings
def update_notiwire(data=None, relative_url=''):
URL = settings.API_URL + settings.NAME + '/'
if not data:
data = {}
data['api_key'] = settings.API_KEY
logging.debug('Ready to send a POST request for {url} with data {data}'.format(url=relative_url, data=data))
r = requests.post(URL + relative_url, data=data)
logging.debug('POST Request sent with response {response}'.format(response=r.text))
class Coffe:
def __init__(self):
self.stopped = False
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
last_update = 0
auth = HTTPBasicAuth(settings.ZWAVE_USER, settings.ZWAVE_PASSWORD)
while True:
time.sleep(settings.POLLING_FREQUENCY)
if self.stopped:
return
try:
requests.get(settings.ZWAVE_URL_COFFEE + '/command/update', auth=auth)
r = requests.get(settings.ZWAVE_URL_COFFEE, auth=auth)
json = r.json()['data']
current_update = json['updateTime']
current_effect = json['metrics']['level']
if current_update == last_update:
logging.info("Coffeesensor is unpowered")
last_update = current_update
continue
if current_effect > 1000:
# COFFEE IS BOILING
update_notiwire(relative_url='coffee')
logging.info('New coffee pot at {date}'.format(date=datetime.datetime.now()))
last_update = current_update
time.sleep(60 * 10)
continue
last_update = current_update
except requests.exceptions.RequestException as e:
logging.error(e)
def stop(self):
self.stopped = True
class Light:
def __init__(self):
self.stopped = False
self.status = 'false'
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
last_update = 0
last_update_to_notiwire = 0
auth = HTTPBasicAuth(settings.ZWAVE_USER, settings.ZWAVE_PASSWORD)
while True:
time.sleep(settings.POLLING_FREQUENCY)
if self.stopped:
return
try:
requests.get(settings.ZWAVE_URL_LIGHT + '/command/update', auth=auth)
r = requests.get(settings.ZWAVE_URL_LIGHT, auth=auth)
json = r.json()['data']
current_update = json['updateTime']
if current_update == last_update:
status = 'false'
logging.info('lights are off')
else:
status = 'true'
logging.info('lights are on')
# Update if light changes, or last update was more than 30 minutes ago
if status != self.status or time.time() - last_update_to_notiwire > 60 * 30:
self.status = status
logging.info("Lightstatus changed at {date}, light status is now {status}"
.format(date=datetime.datetime.now(), status=status))
update_notiwire(data={'status': status}, relative_url='status')
last_update_to_notiwire = time.time()
last_update = current_update
except requests.exceptions.RequestException as e:
logging.error(e)
def stop(self):
self.stopped = True
class Notipi(object):
def __init__(self):
Light().start()
Coffe().start()
def main():
# Logging
log_level = logging.DEBUG if settings.DEBUG else logging.INFO
logging.basicConfig(format='%(asctime)s %(message)s', level=log_level)
logging.info('Starting NotiPi')
notipi = Notipi()
logging.info('NotPi handlers started')
# Wait forever
while True:
time.sleep(1)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"requests.post",
"requests.auth.HTTPBasicAuth",
"time.sleep",
"requests.get",
"datetime.datetime.now",
"time.time",
"threading.Thread",
"logging.info",
"logging.error"
] | [((463, 507), 'requests.post', 'requests.post', (['(URL + relative_url)'], {'data': 'data'}), '(URL + relative_url, data=data)\n', (476, 507), False, 'import requests\n'), ((4057, 4127), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""', 'level': 'log_level'}), "(format='%(asctime)s %(message)s', level=log_level)\n", (4076, 4127), False, 'import logging\n'), ((4132, 4163), 'logging.info', 'logging.info', (['"""Starting NotiPi"""'], {}), "('Starting NotiPi')\n", (4144, 4163), False, 'import logging\n'), ((4190, 4228), 'logging.info', 'logging.info', (['"""NotPi handlers started"""'], {}), "('NotPi handlers started')\n", (4202, 4228), False, 'import logging\n'), ((698, 733), 'threading.Thread', 'Thread', ([], {'target': 'self.update', 'args': '()'}), '(target=self.update, args=())\n', (704, 733), False, 'from threading import Thread\n'), ((858, 917), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['settings.ZWAVE_USER', 'settings.ZWAVE_PASSWORD'], {}), '(settings.ZWAVE_USER, settings.ZWAVE_PASSWORD)\n', (871, 917), False, 'from requests.auth import HTTPBasicAuth\n'), ((2235, 2270), 'threading.Thread', 'Thread', ([], {'target': 'self.update', 'args': '()'}), '(target=self.update, args=())\n', (2241, 2270), False, 'from threading import Thread\n'), ((2431, 2490), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['settings.ZWAVE_USER', 'settings.ZWAVE_PASSWORD'], {}), '(settings.ZWAVE_USER, settings.ZWAVE_PASSWORD)\n', (2444, 2490), False, 'from requests.auth import HTTPBasicAuth\n'), ((4272, 4285), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4282, 4285), False, 'import time\n'), ((950, 988), 'time.sleep', 'time.sleep', (['settings.POLLING_FREQUENCY'], {}), '(settings.POLLING_FREQUENCY)\n', (960, 988), False, 'import time\n'), ((2523, 2561), 'time.sleep', 'time.sleep', (['settings.POLLING_FREQUENCY'], {}), '(settings.POLLING_FREQUENCY)\n', (2533, 2561), False, 'import time\n'), ((1074, 1144), 'requests.get', 'requests.get', (["(settings.ZWAVE_URL_COFFEE + '/command/update')"], {'auth': 'auth'}), "(settings.ZWAVE_URL_COFFEE + '/command/update', auth=auth)\n", (1086, 1144), False, 'import requests\n'), ((1165, 1215), 'requests.get', 'requests.get', (['settings.ZWAVE_URL_COFFEE'], {'auth': 'auth'}), '(settings.ZWAVE_URL_COFFEE, auth=auth)\n', (1177, 1215), False, 'import requests\n'), ((2647, 2716), 'requests.get', 'requests.get', (["(settings.ZWAVE_URL_LIGHT + '/command/update')"], {'auth': 'auth'}), "(settings.ZWAVE_URL_LIGHT + '/command/update', auth=auth)\n", (2659, 2716), False, 'import requests\n'), ((2737, 2786), 'requests.get', 'requests.get', (['settings.ZWAVE_URL_LIGHT'], {'auth': 'auth'}), '(settings.ZWAVE_URL_LIGHT, auth=auth)\n', (2749, 2786), False, 'import requests\n'), ((1436, 1477), 'logging.info', 'logging.info', (['"""Coffeesensor is unpowered"""'], {}), "('Coffeesensor is unpowered')\n", (1448, 1477), False, 'import logging\n'), ((1864, 1883), 'time.sleep', 'time.sleep', (['(60 * 10)'], {}), '(60 * 10)\n', (1874, 1883), False, 'import time\n'), ((2037, 2053), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (2050, 2053), False, 'import logging\n'), ((2987, 3017), 'logging.info', 'logging.info', (['"""lights are off"""'], {}), "('lights are off')\n", (2999, 3017), False, 'import logging\n'), ((3096, 3125), 'logging.info', 'logging.info', (['"""lights are on"""'], {}), "('lights are on')\n", (3108, 3125), False, 'import logging\n'), ((3660, 3671), 'time.time', 'time.time', ([], {}), '()\n', (3669, 3671), False, 'import time\n'), ((3797, 3813), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (3810, 3813), False, 'import logging\n'), ((3258, 3269), 'time.time', 'time.time', ([], {}), '()\n', (3267, 3269), False, 'import time\n'), ((1769, 1792), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1790, 1792), False, 'import datetime\n'), ((3489, 3512), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3510, 3512), False, 'import datetime\n')] |
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from . import views
urlpatterns=[
url('^$',views.index,name = 'index'),
url(r'^profile/(\d+)',views.profile,name = "profile"),
url(r'^create/post',views.new_post, name = "new-post"),
url(r'^follow/(\d+)', views.follow, name = "follow"),
url(r'^likes/(\d+)',views.likes , name = "likes"),
url(r'^post/(\d+)',views.post,name = "post"),
url(r'^create/comment/$', views.comment, name="comment" ),
url(r'^search/',views.search_profile, name ="search_profile"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((148, 184), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (151, 184), False, 'from django.conf.urls import url\n'), ((190, 243), 'django.conf.urls.url', 'url', (['"""^profile/(\\\\d+)"""', 'views.profile'], {'name': '"""profile"""'}), "('^profile/(\\\\d+)', views.profile, name='profile')\n", (193, 243), False, 'from django.conf.urls import url\n'), ((249, 301), 'django.conf.urls.url', 'url', (['"""^create/post"""', 'views.new_post'], {'name': '"""new-post"""'}), "('^create/post', views.new_post, name='new-post')\n", (252, 301), False, 'from django.conf.urls import url\n'), ((309, 359), 'django.conf.urls.url', 'url', (['"""^follow/(\\\\d+)"""', 'views.follow'], {'name': '"""follow"""'}), "('^follow/(\\\\d+)', views.follow, name='follow')\n", (312, 359), False, 'from django.conf.urls import url\n'), ((367, 414), 'django.conf.urls.url', 'url', (['"""^likes/(\\\\d+)"""', 'views.likes'], {'name': '"""likes"""'}), "('^likes/(\\\\d+)', views.likes, name='likes')\n", (370, 414), False, 'from django.conf.urls import url\n'), ((422, 466), 'django.conf.urls.url', 'url', (['"""^post/(\\\\d+)"""', 'views.post'], {'name': '"""post"""'}), "('^post/(\\\\d+)', views.post, name='post')\n", (425, 466), False, 'from django.conf.urls import url\n'), ((472, 527), 'django.conf.urls.url', 'url', (['"""^create/comment/$"""', 'views.comment'], {'name': '"""comment"""'}), "('^create/comment/$', views.comment, name='comment')\n", (475, 527), False, 'from django.conf.urls import url\n'), ((535, 595), 'django.conf.urls.url', 'url', (['"""^search/"""', 'views.search_profile'], {'name': '"""search_profile"""'}), "('^search/', views.search_profile, name='search_profile')\n", (538, 595), False, 'from django.conf.urls import url\n'), ((646, 707), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (652, 707), False, 'from django.conf.urls.static import static\n')] |
from unittest.mock import MagicMock
from unittest.mock import patch
import aiofiles
from aiofiles import threadpool
async def test_unit_get_current_version_both_files_dont_exist(mock_hub, hub, tmp_path):
"""
SCENARIO #1
- override_version_file DOES NOT EXIST
- main_version_file DOES NOT EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as a nonexistent directory
mock_hub.OPT.saltenv.saltenv_dir = "nonexistent_testing_dir"
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch the exists function to return False for both times it is called
with patch("pathlib.PosixPath.exists", side_effect=[False, False]) as mock_exists:
expected = ("", "")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
assert mock_exists.call_count == 2
async def test_unit_get_current_version_only_override_exists(mock_hub, hub, tmp_path):
"""
SCENARIO #2
- override_version_file DOES EXIST
- main_version_file DOES NOT EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as a nonexistent directory
mock_hub.OPT.saltenv.saltenv_dir = "nonexistent_testing_dir"
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch exists to return True the first call and False the second call
with patch("pathlib.PosixPath.exists", side_effect=[True, False]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_override_version = "3004"
mock_file = MagicMock()
with patch("aiofiles.threadpool.sync_open", return_value=mock_file) as mock_open:
# Set the value of read() to be the mock version
mock_file.read.return_value = mock_override_version
# Call get_current_version
expected = (mock_override_version, tmp_path / ".salt-version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_file.read.assert_called_once()
async def test_unit_get_current_version_only_main_exists(mock_hub, hub, tmp_path):
"""
SCENARIO #3
- override_version_file DOES NOT EXIST
- main_version_file DOES EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as the mock directory
mock_hub.OPT.saltenv.saltenv_dir = tmp_path
# Patch os.getcwd() to be the nonexistent directory
with patch("os.getcwd", return_value="nonexistent_testing_dir") as mock_cwd:
# Patch exists to return False the first call and True the second call
with patch("pathlib.PosixPath.exists", side_effect=[False, True]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_main_version = "3003"
mock_file = MagicMock()
with patch("aiofiles.threadpool.sync_open", return_value=mock_file) as mock_open:
# Set the value of read() to be the mock version
mock_file.read.return_value = mock_main_version
# Call get_current_version
expected = (mock_main_version, tmp_path / "version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
assert mock_exists.call_count == 2
mock_open.assert_called_once()
mock_file.read.assert_called_once()
async def test_unit_get_current_version_both_files_exist(mock_hub, hub, tmp_path):
"""
SCENARIO #4
- override_version_file DOES EXIST
- main_version_file DOES EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as the mock directory
mock_hub.OPT.saltenv.saltenv_dir = tmp_path
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch exists to return True for both calls
with patch("pathlib.PosixPath.exists", side_effect=[True, True]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_override_version = "3004"
mock_override_file = MagicMock()
# Set the value of read() to "3004"
mock_override_file.read.return_value = mock_override_version
mock_main_file = MagicMock()
# Set the value of read() to "3003"
mock_main_file.read.return_value = mock_main_file
# Set the open() to return the mocked file for override and then the mocked file for main
with patch(
"aiofiles.threadpool.sync_open", side_effect=[mock_override_file, mock_main_file]
) as mock_open:
# Call get_current_version
expected = (mock_override_version, tmp_path / ".salt-version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_override_file.read.assert_called_once()
assert mock_main_file.read.call_count == 0
| [
"aiofiles.threadpool.wrap.register",
"unittest.mock.MagicMock",
"aiofiles.threadpool.AsyncBufferedIOBase",
"unittest.mock.patch"
] | [((622, 663), 'unittest.mock.patch', 'patch', (['"""os.getcwd"""'], {'return_value': 'tmp_path'}), "('os.getcwd', return_value=tmp_path)\n", (627, 663), False, 'from unittest.mock import patch\n'), ((1657, 1698), 'unittest.mock.patch', 'patch', (['"""os.getcwd"""'], {'return_value': 'tmp_path'}), "('os.getcwd', return_value=tmp_path)\n", (1662, 1698), False, 'from unittest.mock import patch\n'), ((3480, 3538), 'unittest.mock.patch', 'patch', (['"""os.getcwd"""'], {'return_value': '"""nonexistent_testing_dir"""'}), "('os.getcwd', return_value='nonexistent_testing_dir')\n", (3485, 3538), False, 'from unittest.mock import patch\n'), ((5293, 5334), 'unittest.mock.patch', 'patch', (['"""os.getcwd"""'], {'return_value': 'tmp_path'}), "('os.getcwd', return_value=tmp_path)\n", (5298, 5334), False, 'from unittest.mock import patch\n'), ((770, 831), 'unittest.mock.patch', 'patch', (['"""pathlib.PosixPath.exists"""'], {'side_effect': '[False, False]'}), "('pathlib.PosixPath.exists', side_effect=[False, False])\n", (775, 831), False, 'from unittest.mock import patch\n'), ((1804, 1864), 'unittest.mock.patch', 'patch', (['"""pathlib.PosixPath.exists"""'], {'side_effect': '[True, False]'}), "('pathlib.PosixPath.exists', side_effect=[True, False])\n", (1809, 1864), False, 'from unittest.mock import patch\n'), ((2244, 2255), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2253, 2255), False, 'from unittest.mock import MagicMock\n'), ((3644, 3704), 'unittest.mock.patch', 'patch', (['"""pathlib.PosixPath.exists"""'], {'side_effect': '[False, True]'}), "('pathlib.PosixPath.exists', side_effect=[False, True])\n", (3649, 3704), False, 'from unittest.mock import patch\n'), ((4080, 4091), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4089, 4091), False, 'from unittest.mock import MagicMock\n'), ((5414, 5473), 'unittest.mock.patch', 'patch', (['"""pathlib.PosixPath.exists"""'], {'side_effect': '[True, True]'}), "('pathlib.PosixPath.exists', side_effect=[True, True])\n", (5419, 5473), False, 'from unittest.mock import patch\n'), ((5862, 5873), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5871, 5873), False, 'from unittest.mock import MagicMock\n'), ((6024, 6035), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6033, 6035), False, 'from unittest.mock import MagicMock\n'), ((1974, 2018), 'aiofiles.threadpool.wrap.register', 'aiofiles.threadpool.wrap.register', (['MagicMock'], {}), '(MagicMock)\n', (2007, 2018), False, 'import aiofiles\n'), ((2273, 2335), 'unittest.mock.patch', 'patch', (['"""aiofiles.threadpool.sync_open"""'], {'return_value': 'mock_file'}), "('aiofiles.threadpool.sync_open', return_value=mock_file)\n", (2278, 2335), False, 'from unittest.mock import patch\n'), ((3814, 3858), 'aiofiles.threadpool.wrap.register', 'aiofiles.threadpool.wrap.register', (['MagicMock'], {}), '(MagicMock)\n', (3847, 3858), False, 'import aiofiles\n'), ((4109, 4171), 'unittest.mock.patch', 'patch', (['"""aiofiles.threadpool.sync_open"""'], {'return_value': 'mock_file'}), "('aiofiles.threadpool.sync_open', return_value=mock_file)\n", (4114, 4171), False, 'from unittest.mock import patch\n'), ((5583, 5627), 'aiofiles.threadpool.wrap.register', 'aiofiles.threadpool.wrap.register', (['MagicMock'], {}), '(MagicMock)\n', (5616, 5627), False, 'import aiofiles\n'), ((6265, 6357), 'unittest.mock.patch', 'patch', (['"""aiofiles.threadpool.sync_open"""'], {'side_effect': '[mock_override_file, mock_main_file]'}), "('aiofiles.threadpool.sync_open', side_effect=[mock_override_file,\n mock_main_file])\n", (6270, 6357), False, 'from unittest.mock import patch\n'), ((2060, 2107), 'aiofiles.threadpool.AsyncBufferedIOBase', 'threadpool.AsyncBufferedIOBase', (['*args'], {}), '(*args, **kwargs)\n', (2090, 2107), False, 'from aiofiles import threadpool\n'), ((3900, 3947), 'aiofiles.threadpool.AsyncBufferedIOBase', 'threadpool.AsyncBufferedIOBase', (['*args'], {}), '(*args, **kwargs)\n', (3930, 3947), False, 'from aiofiles import threadpool\n'), ((5669, 5716), 'aiofiles.threadpool.AsyncBufferedIOBase', 'threadpool.AsyncBufferedIOBase', (['*args'], {}), '(*args, **kwargs)\n', (5699, 5716), False, 'from aiofiles import threadpool\n')] |
import setuptools
setuptools.setup(
name="fakeokpy",
version='0.1',
url="https://github.com/yuvipanda/fakeokpy",
author="<NAME>",
author_email="<EMAIL>",
license="BSD-3-Clause",
packages=setuptools.find_packages(),
)
| [
"setuptools.find_packages"
] | [((216, 242), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (240, 242), False, 'import setuptools\n')] |
# coding: utf-8
# $ \newcommand{\cat}[2][\phantom{i}]{\ket{C^{#2}_{#1\alpha}}} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\bra}[1]{\langle#1|} $
# $ \newcommand{\braket}[2]{\langle#1|#2\rangle} $
# $\newcommand{\au}{\hat{a}^\dagger}$
# $\newcommand{\ad}{\hat{a}}$
# $\newcommand{\bu}{\hat{b}^\dagger}$
# $\newcommand{\bd}{\hat{b}}$
# # Cat Code Preparation with Optimal Control
# <sup><NAME></sup>
#
# ## Goal
# Obtain a set of pulses which will encode the quantum information of a qubit with "cat codes" (and vice versa).
#
# <sub><NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, ‘Extending the lifetime of a quantum bit with error correction in superconducting circuits’, Nature; London, vol. 536, no. 7617, pp. 441–445, Aug. 2016.</sub>
# # Outline
# * Why cat codes?
# * Optimal control (GRAPE)
# * Using optimal control to generate cat codes
# * My work so far
# # Why use cat codes for error correction?
# The cat code is comprised of the logical basis:
# 
# <p style="text-align: center;">Notation: $ \ket{0}_L = \cat{\pm},\,\, \ket{1}_L = \cat[i]{\pm} $ </p>
# $ \ket{\psi} = c_0 \ket{C_\alpha^\pm} + c_1 \ket{C_{i\alpha}^\pm} $
# 
# ## Crash course in Optimal control (GRAPE)
# 
# We (usually) optimise for fidelity $\newcommand{\tr}[0]{\operatorname{tr}} f_{PSU} = \tfrac{1}{d} \big| \tr \{X_{targ}^{\dagger} X(T)\} \big| $
# # Optimal control for cat codes
# Jaynes-Cummings (dispersive)
# $$ \hat{H} = \omega_s\au\ad \,+ (\omega_a - \chi_{sa}\au\ad)\bu\bd $$
# $$-\, \frac{K_s}{2}\au{}^2\ad{}^2 \,-\, \frac{K_a}{2}\bu{}^2\bd{}^2 $$
# $$+\, \underbrace{\epsilon_a(t)\bu + \epsilon_a^*(t)\bd}_{\text{Qubit drive}} \,+\, \underbrace{\epsilon_s(t)\au + \epsilon_s^*(t)\ad}_{\text{Res drive}} $$
#
# $$ \bu\bd = \ket{e}\bra{e} = \sigma_-\sigma_+ $$
# 
# * Use optimisation to find the pulse envelope which will realise the unitary $ \hat{U}_t \underbrace{(c_0\ket{g} + c_1\ket{e})}_{\text{ancilla}}\underbrace{\ket{0}}_{\text{res}} = \underbrace{\ket{g}}_{\text{ancilla}} \underbrace{(c_0\cat{+} + c_1\cat[i]{+})}_{\text{res}} $
# * Practically this means we want to optimise for $K$ state transfers at the same time $ F_{oc} = \frac{1}{K^2} | \sum_k^K \braket{\psi_k(T)}{\psi_k^{\text{tar}}} |^2 $ where we encode many points on the Bloch sphere in the cat code basis.
# In[7]:
from numpy import sqrt
π = 3.1415926
ω_r = 8.3056 * 2 * π # resonator frequency
ω_q = 6.2815 * 2 * π # qubit frequency
K_q = -2*π*297e-3 # Kerr qubit 200-300 MHz
K_r = 2*π*4.5e-6 # Kerr res 1-10 Khz
ω_ef = ω_q + K_q
ω_gf = ω_q + K_q/2
χ = 25e-3 * 2 * π # parameter in the dispersive hamiltonian
Δ = abs(ω_r - ω_q) # detuning
g = sqrt(Δ * χ) # coupling strength that is consistent with chi
print(g)
# 
# 
# 
# ### My work so far
# * Use the pulse optimisation tool in `QuTiP` (quantum simulation toolbox in Python), or other framework
# * Project status - more difficult than expected
# * Even for the simple things, e.g. bit flip pulse, there are problems with convergence and numerical errors
# * Custom constraints on the pulses aren't implemented yet (nor general optimization goals) in QuTiP
# * I will try `Krotov`, another python toolbox which uses the Krotov method instead of GRAPE
# * Goal of the thesis is to realise this method and then eventually evaluate possible extensions:
# * Other bosonic codes besides "2 lobe"-cat codes
# * Optimise the coefficients of Fock states (theoretical curiosity)
# ## Thank you for listening! Any questions?
| [
"numpy.sqrt"
] | [((2879, 2890), 'numpy.sqrt', 'sqrt', (['(Δ * χ)'], {}), '(Δ * χ)\n', (2883, 2890), False, 'from numpy import sqrt\n')] |
def coding_problem_41(flights_db, starting_airport):
"""
Given an unordered list of flights taken by someone, each represented as (origin, destination) pairs, and a
starting airport, compute the person's itinerary. If no such itinerary exists, return null. If there are multiple
possible itineraries, return the lexicographically smallest one. All flights must be used in the itinerary.
Examples:
>>> coding_problem_41([('SFO', 'HKO'), ('YYZ', 'SFO'), ('YUL', 'YYZ'), ('HKO', 'ORD')], 'YUL')
['YUL', 'YYZ', 'SFO', 'HKO', 'ORD']
>>> coding_problem_41([('SFO', 'COM'), ('COM', 'YYZ')], 'COM') # returns None
>>> coding_problem_41([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'A')], 'A')
['A', 'B', 'C', 'A', 'C']
The itinerary ['A', 'C', 'A', 'B', 'C'] is also a valid however the first one is lexicographically smaller.
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| [
"doctest.testmod"
] | [((935, 964), 'doctest.testmod', 'doctest.testmod', ([], {'verbose': '(True)'}), '(verbose=True)\n', (950, 964), False, 'import doctest\n')] |
import itertools
import pytest
from iterators.invalid_iter import InvalidIter
def _grouper_to_keys(grouper):
return [g[0] for g in grouper]
def _grouper_to_groups(grouper):
return [list(g[1]) for g in grouper]
@pytest.mark.parametrize("keyfunc, data, expected_keys", [
(lambda x: x, [], []),
(lambda x: x, [1, 2, 3], [1, 2, 3]),
(lambda x: x, [1, 2, 2, 2, 3, 3], [1, 2, 3]),
(lambda x: x, "", []),
(lambda x: x, "ABC", ["A", "B", "C"]),
(lambda x: x, "ABBBCC", ["A", "B", "C"]),
])
def test_groupby_basic_case_keys(keyfunc, data, expected_keys):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_keys(grouper) == expected_keys
@pytest.mark.parametrize("keyfunc, data, expected_groups", [
(lambda x: x, [], []),
(lambda x: x, [1, 2, 3], [[1], [2], [3]]),
(lambda x: x, [1, 2, 2, 2, 3, 3], [[1], [2, 2, 2], [3, 3]]),
(lambda x: x, "", []),
(lambda x: x, "ABC", [["A"], ["B"], ["C"]]),
(lambda x: x, "ABBBCC", [["A"], ["B", "B", "B"], ["C", "C"]]),
])
def test_groupby_basic_case_groups(keyfunc, data, expected_groups):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_groups(grouper) == expected_groups
@pytest.mark.parametrize("keyfunc, data, exception_message", [
(lambda x: x, 1, "'int' object is not iterable"),
(lambda x: x, min, "'builtin_function_or_method' object is not iterable"),
(lambda x: x, InvalidIter(), "'InvalidIter' object is not iterable")
])
def test_groupby_basic_case_invalid_data(keyfunc, data, exception_message):
with pytest.raises(TypeError) as excinfo:
itertools.groupby(data, keyfunc)
assert excinfo.value.args[0] == exception_message
@pytest.mark.parametrize("keyfunc, data, expected_keys", [
(lambda x: x % 2, [], []),
(lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [1, 0]),
(lambda x: x % 2, [1, 2, 3, 4, 5], [1, 0, 1, 0, 1]),
(lambda x: True, [], []),
(lambda x: True, [1, 2, 3, 4], [True]),
(lambda x: True, "ABCDEF", [True]),
])
def test_groupby_different_keyfunc_keys(keyfunc, data, expected_keys):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_keys(grouper) == expected_keys
@pytest.mark.parametrize("keyfunc, data, expected_groups", [
(lambda x: x % 2, [], []),
(lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [[1, 3, 5, 7], [2, 4, 6, 8]]),
(lambda x: x % 2, [1, 2, 3, 4, 5], [[1], [2], [3], [4], [5]]),
(lambda x: True, [], []),
(lambda x: True, [1, 2, 3, 4], [[1, 2, 3, 4]]),
(lambda x: True, "ABCDEF", [["A", "B", "C", "D", "E", "F"]]),
])
def test_groupby_different_keyfunc_groups(keyfunc, data, expected_groups):
grouper = itertools.groupby(data, keyfunc)
assert _grouper_to_groups(grouper) == expected_groups
| [
"pytest.mark.parametrize",
"iterators.invalid_iter.InvalidIter",
"pytest.raises",
"itertools.groupby"
] | [((225, 504), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""keyfunc, data, expected_keys"""', "[(lambda x: x, [], []), (lambda x: x, [1, 2, 3], [1, 2, 3]), (lambda x: x,\n [1, 2, 2, 2, 3, 3], [1, 2, 3]), (lambda x: x, '', []), (lambda x: x,\n 'ABC', ['A', 'B', 'C']), (lambda x: x, 'ABBBCC', ['A', 'B', 'C'])]"], {}), "('keyfunc, data, expected_keys', [(lambda x: x, [],\n []), (lambda x: x, [1, 2, 3], [1, 2, 3]), (lambda x: x, [1, 2, 2, 2, 3,\n 3], [1, 2, 3]), (lambda x: x, '', []), (lambda x: x, 'ABC', ['A', 'B',\n 'C']), (lambda x: x, 'ABBBCC', ['A', 'B', 'C'])])\n", (248, 504), False, 'import pytest\n'), ((688, 1021), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""keyfunc, data, expected_groups"""', "[(lambda x: x, [], []), (lambda x: x, [1, 2, 3], [[1], [2], [3]]), (lambda\n x: x, [1, 2, 2, 2, 3, 3], [[1], [2, 2, 2], [3, 3]]), (lambda x: x, '',\n []), (lambda x: x, 'ABC', [['A'], ['B'], ['C']]), (lambda x: x,\n 'ABBBCC', [['A'], ['B', 'B', 'B'], ['C', 'C']])]"], {}), "('keyfunc, data, expected_groups', [(lambda x: x, [],\n []), (lambda x: x, [1, 2, 3], [[1], [2], [3]]), (lambda x: x, [1, 2, 2,\n 2, 3, 3], [[1], [2, 2, 2], [3, 3]]), (lambda x: x, '', []), (lambda x:\n x, 'ABC', [['A'], ['B'], ['C']]), (lambda x: x, 'ABBBCC', [['A'], ['B',\n 'B', 'B'], ['C', 'C']])])\n", (711, 1021), False, 'import pytest\n'), ((1700, 2004), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""keyfunc, data, expected_keys"""', "[(lambda x: x % 2, [], []), (lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [1,\n 0]), (lambda x: x % 2, [1, 2, 3, 4, 5], [1, 0, 1, 0, 1]), (lambda x: \n True, [], []), (lambda x: True, [1, 2, 3, 4], [True]), (lambda x: True,\n 'ABCDEF', [True])]"], {}), "('keyfunc, data, expected_keys', [(lambda x: x % 2,\n [], []), (lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [1, 0]), (lambda x:\n x % 2, [1, 2, 3, 4, 5], [1, 0, 1, 0, 1]), (lambda x: True, [], []), (lambda\n x: True, [1, 2, 3, 4], [True]), (lambda x: True, 'ABCDEF', [True])])\n", (1723, 2004), False, 'import pytest\n'), ((2195, 2572), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""keyfunc, data, expected_groups"""', "[(lambda x: x % 2, [], []), (lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [[1,\n 3, 5, 7], [2, 4, 6, 8]]), (lambda x: x % 2, [1, 2, 3, 4, 5], [[1], [2],\n [3], [4], [5]]), (lambda x: True, [], []), (lambda x: True, [1, 2, 3, 4\n ], [[1, 2, 3, 4]]), (lambda x: True, 'ABCDEF', [['A', 'B', 'C', 'D',\n 'E', 'F']])]"], {}), "('keyfunc, data, expected_groups', [(lambda x: x % 2,\n [], []), (lambda x: x % 2, [1, 3, 5, 7, 2, 4, 6, 8], [[1, 3, 5, 7], [2,\n 4, 6, 8]]), (lambda x: x % 2, [1, 2, 3, 4, 5], [[1], [2], [3], [4], [5]\n ]), (lambda x: True, [], []), (lambda x: True, [1, 2, 3, 4], [[1, 2, 3,\n 4]]), (lambda x: True, 'ABCDEF', [['A', 'B', 'C', 'D', 'E', 'F']])])\n", (2218, 2572), False, 'import pytest\n'), ((598, 630), 'itertools.groupby', 'itertools.groupby', (['data', 'keyfunc'], {}), '(data, keyfunc)\n', (615, 630), False, 'import itertools\n'), ((1115, 1147), 'itertools.groupby', 'itertools.groupby', (['data', 'keyfunc'], {}), '(data, keyfunc)\n', (1132, 1147), False, 'import itertools\n'), ((2105, 2137), 'itertools.groupby', 'itertools.groupby', (['data', 'keyfunc'], {}), '(data, keyfunc)\n', (2122, 2137), False, 'import itertools\n'), ((2672, 2704), 'itertools.groupby', 'itertools.groupby', (['data', 'keyfunc'], {}), '(data, keyfunc)\n', (2689, 2704), False, 'import itertools\n'), ((1565, 1589), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1578, 1589), False, 'import pytest\n'), ((1610, 1642), 'itertools.groupby', 'itertools.groupby', (['data', 'keyfunc'], {}), '(data, keyfunc)\n', (1627, 1642), False, 'import itertools\n'), ((1422, 1435), 'iterators.invalid_iter.InvalidIter', 'InvalidIter', ([], {}), '()\n', (1433, 1435), False, 'from iterators.invalid_iter import InvalidIter\n')] |
import serialio
class Serial(object):
def __init__(self, port, baudrate, timeout):
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self._openPort()
def _openPort(self):
self.hComm = serialio.Serial(self.port, self.baudrate) # Opening the port
def read(self):
data = serialio.read(self.hComm) # Listening to serial port
splited = data.split() # To remove \r\n(\n)
return splited[0] # Returning the data
ser = Serial("COM3", 9600, 1)
ser.read() | [
"serialio.read",
"serialio.Serial"
] | [((223, 264), 'serialio.Serial', 'serialio.Serial', (['self.port', 'self.baudrate'], {}), '(self.port, self.baudrate)\n', (238, 264), False, 'import serialio\n'), ((315, 340), 'serialio.read', 'serialio.read', (['self.hComm'], {}), '(self.hComm)\n', (328, 340), False, 'import serialio\n')] |
import numpy as np
class NumpyDynamic:
def __init__(self, dtype, array_size=(100,)):
self.data = np.zeros(array_size, dtype)
self.array_size = list(array_size)
self.size = 0
def add(self, x):
if self.size == self.array_size[0]:
self.array_size[0] *= 2
newdata = np.zeros(self.array_size, self.data.dtype)
newdata[:self.size] = self.data
self.data = newdata
self.data[self.size] = x
self.size += 1
def finalize(self):
return self.data[:self.size] | [
"numpy.zeros"
] | [((112, 139), 'numpy.zeros', 'np.zeros', (['array_size', 'dtype'], {}), '(array_size, dtype)\n', (120, 139), True, 'import numpy as np\n'), ((330, 372), 'numpy.zeros', 'np.zeros', (['self.array_size', 'self.data.dtype'], {}), '(self.array_size, self.data.dtype)\n', (338, 372), True, 'import numpy as np\n')] |
from rest_framework import serializers
from api.models import RouteModel
class RouteDistanceSerializer(serializers.ModelSerializer):
km = serializers.FloatField(source='distance', read_only=True)
class Meta:
model = RouteModel
fields = ('route_id', 'km')
| [
"rest_framework.serializers.FloatField"
] | [((144, 201), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'source': '"""distance"""', 'read_only': '(True)'}), "(source='distance', read_only=True)\n", (166, 201), False, 'from rest_framework import serializers\n')] |
import numpy as np
from abc import ABCMeta, abstractmethod
class Node(object):
"""Represents state in MCTS search tree.
Args:
state (object): The environment state corresponding to this node in the search tree.
Note:
Node object is immutable. Node is left without exit edges (empty dict) when it's terminal.
"""
def __init__(self, state):
self._state = state
self._edges = None
@property
def state(self):
"""object: The environment state corresponding to this node in the search tree."""
return self._state
@property
def edges(self):
"""list of Edges: Mapping from this node's possible actions to corresponding edges."""
return self._edges
def expand(self, edges):
"""Initialize Node object with edges.
Args:
edges (dict of Edges): Mapping from this node's possible actions to corresponding edges.
"""
self._edges = edges
def select_edge(self, c=1.):
"""Choose next action (edge) according to UCB formula.
Args:
c (float): The parameter c >= 0 controls the trade-off between choosing lucrative nodes
(low c) and exploring nodes with low visit counts (high c). (Default: 1)
Returns:
int: Action chosen with UCB formula.
Edge: Edge which represents proper action chosen with UCB formula.
or
None: If it is terminal node and has no exit edges.
"""
assert self.edges is not None, "This node hasn't been expanded yet!"
if len(self.edges) == 0:
return None
state_visits = 0
scores = {}
# Initialize every edge's score to its Q-value and count current state visits
for action, edge in self.edges.items():
state_visits += edge.num_visits
scores[(action, edge)] = edge.qvalue
# Add exploration term to every edge's score
for action, edge in self.edges.items():
scores[(action, edge)] += c * edge.prior * \
np.sqrt(state_visits) / (1 + edge.num_visits)
# Choose next action and edge with highest score
action_edge = max(scores, key=scores.get)
return action_edge
class Edge(object):
"""Represents state-actions pair in MCTS search tree.
Args:
prior (float): Action probability from prior policy. (Default: 1.)
"""
def __init__(self, prior=1.):
self._prior = prior
self._next_node = None
self._reward = 0
self._qvalue = 0
self._num_visits = 0
def expand(self, next_node, reward):
"""Explore this edge.
Args:
next_node (Node): Node that this edge points to.
reward (float): Reward of transition represented by this edge.
"""
self._next_node = next_node
self._reward = reward
def update(self, return_t):
"""Update edge with data from child.
Args:
return_t (float): (Un)discounted return from timestep 't' (this edge).
"""
self._num_visits += 1
# This is formula for iteratively calculating average
# NOTE: You can check that first arbitrary value will be forgotten after fist update
self._qvalue += (return_t - self._qvalue) / self.num_visits
@property
def next_node(self):
"""next_node (Node): Node that this edge points to."""
return self._next_node
@property
def reward(self):
"""float: Reward of transition represented by this edge."""
return self._reward
@property
def qvalue(self):
"""float: Quality value of this edge state-action pair."""
return self._qvalue
@property
def prior(self):
"""float: Action probability from prior policy."""
return self._prior
@property
def num_visits(self):
"""int: Number of times this state-action pair was visited."""
return self._num_visits
| [
"numpy.sqrt"
] | [((2113, 2134), 'numpy.sqrt', 'np.sqrt', (['state_visits'], {}), '(state_visits)\n', (2120, 2134), True, 'import numpy as np\n')] |
import os
import random
from sklearn.metrics import mean_squared_error as mse
from core.composer.chain import Chain
from core.composer.composer import ComposerRequirements, DummyChainTypeEnum, DummyComposer
from core.models.data import OutputData
from core.models.model import *
from core.repository.dataset_types import NumericalDataTypesEnum, CategoricalDataTypesEnum
from core.repository.model_types_repository import (
ModelMetaInfoTemplate,
ModelTypesRepository
)
from core.repository.quality_metrics_repository import MetricsRepository, RegressionMetricsEnum
from core.repository.task_types import MachineLearningTasksEnum
from core.utils import project_root
random.seed(1)
np.random.seed(1)
import matplotlib.pyplot as plt
def compare_plot(predicted: OutputData, dataset_to_validate: InputData):
fig, ax = plt.subplots()
plt.plot(dataset_to_validate.target, linewidth=1, label="Observed")
plt.plot(predicted.predict, linewidth=1, label="Predicted")
ax.legend()
plt.show()
def calculate_validation_metric(chain: Chain, dataset_to_validate: InputData) -> float:
# the execution of the obtained composite models
predicted = chain.predict(dataset_to_validate)
# plot results
compare_plot(predicted, dataset_to_validate)
# the quality assessment for the simulation results
roc_auc_value = mse(y_true=dataset_to_validate.target,
y_pred=predicted.predict,
squared=False)
return roc_auc_value
# the dataset was obtained from NEMO model simulation
# specify problem type
problem_class = MachineLearningTasksEnum.auto_regression
# a dataset that will be used as a train and test set during composition
file_path_train = 'cases/data/ts/metocean_data_train.csv'
full_path_train = os.path.join(str(project_root()), file_path_train)
dataset_to_compose = InputData.from_csv(full_path_train, task_type=problem_class)
# a dataset for a final validation of the composed model
file_path_test = 'cases/data/ts/metocean_data_test.csv'
full_path_test = os.path.join(str(project_root()), file_path_test)
dataset_to_validate = InputData.from_csv(full_path_test, task_type=problem_class)
# the search of the models provided by the framework that can be used as nodes in a chain for the selected task
models_repo = ModelTypesRepository()
available_model_types, _ = models_repo.search_models(
desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table,
output_type=CategoricalDataTypesEnum.vector,
task_type=problem_class,
can_be_initial=True,
can_be_secondary=True))
# the choice of the metric for the chain quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)
# the choice and initialisation
single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.ar],
secondary=[])
chain_single = DummyComposer(
DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose,
initial_chain=None,
composer_requirements=single_composer_requirements,
metrics=metric_function)
train_prediction = chain_single.fit(input_data=dataset_to_compose, verbose=True)
print("Composition finished")
compare_plot(train_prediction, dataset_to_compose)
# the quality assessment for the obtained composite models
rmse_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate)
print(f'Static RMSE is {round(rmse_on_valid_single, 3)}')
| [
"core.composer.composer.DummyComposer",
"core.repository.model_types_repository.ModelTypesRepository",
"core.utils.project_root",
"matplotlib.pyplot.plot",
"random.seed",
"sklearn.metrics.mean_squared_error",
"core.repository.model_types_repository.ModelMetaInfoTemplate",
"core.composer.composer.Compo... | [((694, 708), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (705, 708), False, 'import random\n'), ((2380, 2402), 'core.repository.model_types_repository.ModelTypesRepository', 'ModelTypesRepository', ([], {}), '()\n', (2400, 2402), False, 'from core.repository.model_types_repository import ModelMetaInfoTemplate, ModelTypesRepository\n'), ((3065, 3131), 'core.composer.composer.ComposerRequirements', 'ComposerRequirements', ([], {'primary': '[ModelTypesIdsEnum.ar]', 'secondary': '[]'}), '(primary=[ModelTypesIdsEnum.ar], secondary=[])\n', (3085, 3131), False, 'from core.composer.composer import ComposerRequirements, DummyChainTypeEnum, DummyComposer\n'), ((856, 870), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (868, 870), True, 'import matplotlib.pyplot as plt\n'), ((876, 943), 'matplotlib.pyplot.plot', 'plt.plot', (['dataset_to_validate.target'], {'linewidth': '(1)', 'label': '"""Observed"""'}), "(dataset_to_validate.target, linewidth=1, label='Observed')\n", (884, 943), True, 'import matplotlib.pyplot as plt\n'), ((949, 1008), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted.predict'], {'linewidth': '(1)', 'label': '"""Predicted"""'}), "(predicted.predict, linewidth=1, label='Predicted')\n", (957, 1008), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1041, 1043), True, 'import matplotlib.pyplot as plt\n'), ((1395, 1474), 'sklearn.metrics.mean_squared_error', 'mse', ([], {'y_true': 'dataset_to_validate.target', 'y_pred': 'predicted.predict', 'squared': '(False)'}), '(y_true=dataset_to_validate.target, y_pred=predicted.predict, squared=False)\n', (1398, 1474), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((1865, 1879), 'core.utils.project_root', 'project_root', ([], {}), '()\n', (1877, 1879), False, 'from core.utils import project_root\n'), ((2134, 2148), 'core.utils.project_root', 'project_root', ([], {}), '()\n', (2146, 2148), False, 'from core.utils import project_root\n'), ((2480, 2665), 'core.repository.model_types_repository.ModelMetaInfoTemplate', 'ModelMetaInfoTemplate', ([], {'input_type': 'NumericalDataTypesEnum.table', 'output_type': 'CategoricalDataTypesEnum.vector', 'task_type': 'problem_class', 'can_be_initial': '(True)', 'can_be_secondary': '(True)'}), '(input_type=NumericalDataTypesEnum.table, output_type=\n CategoricalDataTypesEnum.vector, task_type=problem_class,\n can_be_initial=True, can_be_secondary=True)\n', (2501, 2665), False, 'from core.repository.model_types_repository import ModelMetaInfoTemplate, ModelTypesRepository\n'), ((2935, 2954), 'core.repository.quality_metrics_repository.MetricsRepository', 'MetricsRepository', ([], {}), '()\n', (2952, 2954), False, 'from core.repository.quality_metrics_repository import MetricsRepository, RegressionMetricsEnum\n'), ((3201, 3239), 'core.composer.composer.DummyComposer', 'DummyComposer', (['DummyChainTypeEnum.flat'], {}), '(DummyChainTypeEnum.flat)\n', (3214, 3239), False, 'from core.composer.composer import ComposerRequirements, DummyChainTypeEnum, DummyComposer\n')] |
import os
import re
import shutil
def svnLockFiles(files):
fileStr = ' '.join(files)
print('Locking files: ', fileStr)
os.system('svn lock ' + fileStr)
def svnUnlockFiles(files):
fileStr = ' '.join(files)
print('Unlocking files: ', fileStr)
os.system('svn unlock ' + fileStr)
# No special characters are allowed in oldStr except the '.' character which is handled correctly
def replaceStrings(file, oldStr, newStr):
print("Replacing string '%s' with '%s' in file '%s'."%(oldStr, newStr, file))
input = open(file)
tmpFileName = file + '.tmp'
output = open(tmpFileName, 'w')
versionExpr = re.compile(oldStr.replace('.', r'\.'))
lineNumber = 0;
for line in input:
lineNumber += 1
if (versionExpr.search(line)):
newLine = versionExpr.sub(newStr, line)
output.write(newLine)
else:
output.write(line)
input.close()
output.close();
shutil.move(tmpFileName, file)
def removeLinesContaining(file, str):
print("Removing lines with '%s' in file '%s'."%(str, file))
input = open(file)
tmpFileName = file + '.tmp'
output = open(tmpFileName, 'w')
versionExpr = re.compile(oldStr.replace('.', r'\.'))
lineNumber = 0;
for line in input:
lineNumber += 1
if (versionExpr.search(line)):
None #Skip copying this line
else:
output.write(line)
input.close()
output.close();
shutil.move(tmpFileName, file)
| [
"os.system",
"shutil.move"
] | [((143, 175), 'os.system', 'os.system', (["('svn lock ' + fileStr)"], {}), "('svn lock ' + fileStr)\n", (152, 175), False, 'import os\n'), ((283, 317), 'os.system', 'os.system', (["('svn unlock ' + fileStr)"], {}), "('svn unlock ' + fileStr)\n", (292, 317), False, 'import os\n'), ((1009, 1039), 'shutil.move', 'shutil.move', (['tmpFileName', 'file'], {}), '(tmpFileName, file)\n', (1020, 1039), False, 'import shutil\n'), ((1564, 1594), 'shutil.move', 'shutil.move', (['tmpFileName', 'file'], {}), '(tmpFileName, file)\n', (1575, 1594), False, 'import shutil\n')] |
import numpy as np
import time
import pytest
import jax.numpy as jnp
import jax.config as config
import torch
import tensorflow as tf
from tensornetwork.linalg import linalg
from tensornetwork import backends
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
#pylint: disable=no-member
config.update("jax_enable_x64", True)
np_real = [np.float32, np.float16, np.float64]
np_float = np_real + [np.complex64, np.complex128]
np_int = [np.int8, np.int16, np.int32, np.int64]
np_uint = [np.uint8, np.uint16, np.uint32, np.uint64]
np_dtypes = {"real": np_real, "float": np_float,
"rand": np_float,
"int": np_int + np_uint,
"all": np_real+ np_int + np_uint + [None, ]}
tf_real = [tf.float32, tf.float16, tf.float64]
tf_float = tf_real + [tf.complex64, tf.complex128]
tf_int = [tf.int8, tf.int16, tf.int32, tf.int64]
tf_uint = [tf.uint8, tf.uint16, tf.uint32, tf.uint64]
tf_dtypes = {"real": tf_real, "float": tf_float,
"rand": tf_real + [None, ],
"int": tf_int + tf_uint,
"all": tf_real + tf_int + tf_uint + [None, ]}
torch_float = [torch.float32, torch.float16, torch.float64]
torch_int = [torch.int8, torch.int16, torch.int32, torch.int64]
torch_uint = [torch.uint8]
torch_dtypes = {"real": torch_float, "float": torch_float,
"rand": [torch.float32, torch.float64, None],
"int": torch_int + torch_uint,
"all": torch_float + torch_int + torch_uint + [None, ]}
dtypes = {"pytorch": torch_dtypes,
"jax": np_dtypes, "numpy": np_dtypes, "tensorflow": tf_dtypes}
def test_eye(backend):
"""
Tests linalg.eye against np.eye.
"""
N = 4
M = 6
name = "Jeffrey"
axis_names = ["Sam", "Blinkey"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.eye(N, dtype=dtype, M=M, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_zeros(backend):
"""
Tests linalg.zeros against np.zeros.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.zeros(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_ones(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = linalg.ones(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_randn(backend):
"""
Tests linalg.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.randn(shape, dtype=dtype, name=name, axis_names=axis_names,
backend=backend, seed=seed)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
def test_random_uniform(backend):
"""
Tests linalg.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = linalg.random_uniform(shape, dtype=dtype, name=name,
axis_names=axis_names, backend=backend,
seed=seed, boundaries=boundaries)
npI = backend_obj.random_uniform(shape, dtype=dtype, seed=seed,
boundaries=boundaries)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend
| [
"jax.config.update",
"tensornetwork.linalg.linalg.zeros",
"tensornetwork.linalg.linalg.randn",
"numpy.testing.assert_allclose",
"tensornetwork.backends.backend_factory.get_backend",
"tensornetwork.linalg.linalg.eye",
"numpy.random.seed",
"tensornetwork.linalg.linalg.ones",
"time.time",
"tensornetw... | [((342, 379), 'jax.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (355, 379), True, 'import jax.config as config\n'), ((1806, 1851), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (1842, 1851), False, 'from tensornetwork import backends\n'), ((2476, 2521), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (2512, 2521), False, 'from tensornetwork import backends\n'), ((3147, 3192), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (3183, 3192), False, 'from tensornetwork import backends\n'), ((3777, 3802), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (3791, 3802), True, 'import numpy as np\n'), ((3893, 3938), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (3929, 3938), False, 'from tensornetwork import backends\n'), ((4548, 4573), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (4562, 4573), True, 'import numpy as np\n'), ((4692, 4737), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (4728, 4737), False, 'from tensornetwork import backends\n'), ((1901, 1988), 'tensornetwork.linalg.linalg.eye', 'linalg.eye', (['N'], {'dtype': 'dtype', 'M': 'M', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend'}), '(N, dtype=dtype, M=M, name=name, axis_names=axis_names, backend=\n backend)\n', (1911, 1988), False, 'from tensornetwork.linalg import linalg\n'), ((2056, 2099), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (2082, 2099), True, 'import numpy as np\n'), ((2571, 2659), 'tensornetwork.linalg.linalg.zeros', 'linalg.zeros', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend'}), '(shape, dtype=dtype, name=name, axis_names=axis_names, backend=\n backend)\n', (2583, 2659), False, 'from tensornetwork.linalg import linalg\n'), ((2730, 2773), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (2756, 2773), True, 'import numpy as np\n'), ((3242, 3329), 'tensornetwork.linalg.linalg.ones', 'linalg.ones', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend'}), '(shape, dtype=dtype, name=name, axis_names=axis_names, backend=\n backend)\n', (3253, 3329), False, 'from tensornetwork.linalg import linalg\n'), ((3398, 3441), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (3424, 3441), True, 'import numpy as np\n'), ((3762, 3773), 'time.time', 'time.time', ([], {}), '()\n', (3771, 3773), False, 'import time\n'), ((3989, 4088), 'tensornetwork.linalg.linalg.randn', 'linalg.randn', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend', 'seed': 'seed'}), '(shape, dtype=dtype, name=name, axis_names=axis_names, backend=\n backend, seed=seed)\n', (4001, 4088), False, 'from tensornetwork.linalg import linalg\n'), ((4170, 4213), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (4196, 4213), True, 'import numpy as np\n'), ((4533, 4544), 'time.time', 'time.time', ([], {}), '()\n', (4542, 4544), False, 'import time\n'), ((4788, 4918), 'tensornetwork.linalg.linalg.random_uniform', 'linalg.random_uniform', (['shape'], {'dtype': 'dtype', 'name': 'name', 'axis_names': 'axis_names', 'backend': 'backend', 'seed': 'seed', 'boundaries': 'boundaries'}), '(shape, dtype=dtype, name=name, axis_names=axis_names,\n backend=backend, seed=seed, boundaries=boundaries)\n', (4809, 4918), False, 'from tensornetwork.linalg import linalg\n'), ((5111, 5154), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tnI.tensor', 'npI'], {}), '(tnI.tensor, npI)\n', (5137, 5154), True, 'import numpy as np\n')] |
"""main.py file representingcomparison statistics for Pyrunc module"""
# Python module(s)
from timeit import timeit
# Project module(s)
from Pyrunc import Pyrunc
def main():
"""Main Method"""
pr_c = Pyrunc()
# --------------------------------------------------------------------------------
# ----------------Example 1: 2 Number adder---------------------------------------
# --------------------------------------------------------------------------------
print("Example 1:-")
obj_id, obj = pr_c.build(
"""int two_number_adder(int a, int b) {
return a+b;
}"""
)
print(
"\tTwo number adder demonstrating sum of 5 and 3, result:",
obj.two_number_adder(5, 3),
)
# Comparison Example 1
psetup = """def padder(a,b):
return a+b"""
csetup = """
from Pyrunc import Pyrunc
pr_c = Pyrunc()
obj_id, obj = pr_c.build('''int cadder(int a, int b) {
return a+b;
}''')
cadder = obj.cadder
"""
print("Comparison:-")
print(
"\tC code:", timeit(stmt="cadder(30, 10)", setup=csetup, number=1000) * 10 ** 5
)
print(
"\tPython:", timeit(stmt="padder(30, 10)", setup=psetup, number=1000) * 10 ** 5
)
# ---------------------------------------------------------------------------------
# ----------------Example 2: Sum of first n natural number calculator--------------
# ---------------------------------------------------------------------------------
print("\n\nExample 2:-")
obj_id2, obj2 = pr_c.build(
"""int sum_n_natural_numbers(int a)
{
int i,ans=0;
for(i=1; i<=a; ++i)
ans += i;
return ans;
}"""
)
print(
"\tSum of first n natural numbers with nuber 30, result:",
obj2.sum_n_natural_numbers(30),
)
# Comparison
c_setup = """
from Pyrunc import Pyrunc
pr_c = Pyrunc()
obj_id, obj = pr_c.build('''int csummer(int a) {
int i, ans=0;
for(i=0; i<=a; ++i)
ans += i;
return ans;
}''')
csummer = obj.csummer
"""
psetup1 = """def psummer(a):
ans = 0
for i in range(a):
ans += i
return ans"""
psetup2 = """def psummer(a):
return sum(list(range(a)))"""
psetup3 = """def psummer(a):
return sum([i for i in range(a)])"""
print("Comparison:-")
print("\tC code:", timeit(stmt="csummer(30)", setup=c_setup, number=1000))
print("\tPython1:", timeit(stmt="psummer(30)", setup=psetup1, number=1000))
print("\tPython2:", timeit(stmt="psummer(30)", setup=psetup2, number=1000))
print("\tPython3:", timeit(stmt="psummer(30)", setup=psetup3, number=1000))
if __name__ == "__main__":
main()
| [
"timeit.timeit",
"Pyrunc.Pyrunc"
] | [((214, 222), 'Pyrunc.Pyrunc', 'Pyrunc', ([], {}), '()\n', (220, 222), False, 'from Pyrunc import Pyrunc\n'), ((2360, 2414), 'timeit.timeit', 'timeit', ([], {'stmt': '"""csummer(30)"""', 'setup': 'c_setup', 'number': '(1000)'}), "(stmt='csummer(30)', setup=c_setup, number=1000)\n", (2366, 2414), False, 'from timeit import timeit\n'), ((2440, 2494), 'timeit.timeit', 'timeit', ([], {'stmt': '"""psummer(30)"""', 'setup': 'psetup1', 'number': '(1000)'}), "(stmt='psummer(30)', setup=psetup1, number=1000)\n", (2446, 2494), False, 'from timeit import timeit\n'), ((2520, 2574), 'timeit.timeit', 'timeit', ([], {'stmt': '"""psummer(30)"""', 'setup': 'psetup2', 'number': '(1000)'}), "(stmt='psummer(30)', setup=psetup2, number=1000)\n", (2526, 2574), False, 'from timeit import timeit\n'), ((2600, 2654), 'timeit.timeit', 'timeit', ([], {'stmt': '"""psummer(30)"""', 'setup': 'psetup3', 'number': '(1000)'}), "(stmt='psummer(30)', setup=psetup3, number=1000)\n", (2606, 2654), False, 'from timeit import timeit\n'), ((1045, 1101), 'timeit.timeit', 'timeit', ([], {'stmt': '"""cadder(30, 10)"""', 'setup': 'csetup', 'number': '(1000)'}), "(stmt='cadder(30, 10)', setup=csetup, number=1000)\n", (1051, 1101), False, 'from timeit import timeit\n'), ((1150, 1206), 'timeit.timeit', 'timeit', ([], {'stmt': '"""padder(30, 10)"""', 'setup': 'psetup', 'number': '(1000)'}), "(stmt='padder(30, 10)', setup=psetup, number=1000)\n", (1156, 1206), False, 'from timeit import timeit\n')] |
from django.urls import reverse_lazy, reverse
from django.views.generic import TemplateView
from exporter.applications.services import post_applications, post_open_general_licences_applications
from exporter.apply_for_a_licence.forms.open_general_licences import (
open_general_licence_forms,
open_general_licence_submit_success_page,
)
from exporter.apply_for_a_licence.forms.triage_questions import (
opening_question,
export_licence_questions,
MOD_questions,
transhipment_questions,
trade_control_licence_questions,
)
from exporter.apply_for_a_licence.validators import validate_opening_question, validate_open_general_licences
from exporter.core.constants import PERMANENT, CaseTypes
from exporter.core.services import post_open_general_licence_cases
from lite_forms.views import SingleFormView, MultiFormView
from core.auth.views import LoginRequiredMixin
class LicenceType(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.form = opening_question()
self.action = validate_opening_question
def get_success_url(self):
licence_type = self.get_validated_data()["licence_type"]
return reverse_lazy(f"apply_for_a_licence:{licence_type}_questions")
class ExportLicenceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = export_licence_questions(request, None)
def get_action(self):
if self.request.POST.get("application_type") == CaseTypes.OGEL:
return post_open_general_licences_applications
else:
return post_applications
def on_submission(self, request, **kwargs):
copied_req = request.POST.copy()
self.forms = export_licence_questions(
request, copied_req.get("application_type"), copied_req.get("goodstype_category")
)
def get_success_url(self):
if self.request.POST.get("application_type") == CaseTypes.OGEL:
return reverse_lazy("apply_for_a_licence:ogl_questions", kwargs={"ogl": CaseTypes.OGEL})
else:
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class TradeControlLicenceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = trade_control_licence_questions(request)
self.action = post_applications
def get_success_url(self):
if self.request.POST.get("application_type") == CaseTypes.OGTCL:
return reverse_lazy("apply_for_a_licence:ogl_questions", kwargs={"ogl": CaseTypes.OGTCL})
else:
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class TranshipmentQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = transhipment_questions(request)
self.action = post_applications
self.data = {"export_type": PERMANENT}
def get_success_url(self):
if self.request.POST.get("application_type") == CaseTypes.OGTL:
return reverse_lazy("apply_for_a_licence:ogl_questions", kwargs={"ogl": CaseTypes.OGTL})
else:
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class MODClearanceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = MOD_questions(None)
self.action = post_applications
def on_submission(self, request, **kwargs):
self.forms = MOD_questions(request.POST.copy().get("application_type"))
def get_success_url(self):
pk = self.get_validated_data()["id"]
return reverse_lazy("applications:task_list", kwargs={"pk": pk})
class OpenGeneralLicenceQuestions(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.forms = open_general_licence_forms(request, **kwargs)
self.action = validate_open_general_licences
def get_success_url(self):
post_open_general_licence_cases(self.request, self.get_validated_data())
return (
reverse(
"apply_for_a_licence:ogl_submit",
kwargs={"ogl": self.kwargs["ogl"], "pk": self.get_validated_data()["open_general_licence"]},
)
+ "?animate=True"
)
class OpenGeneralLicenceSubmit(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
return open_general_licence_submit_success_page(request, **kwargs)
| [
"exporter.apply_for_a_licence.forms.open_general_licences.open_general_licence_forms",
"exporter.apply_for_a_licence.forms.triage_questions.transhipment_questions",
"exporter.apply_for_a_licence.forms.triage_questions.MOD_questions",
"exporter.apply_for_a_licence.forms.triage_questions.trade_control_licence_q... | [((1008, 1026), 'exporter.apply_for_a_licence.forms.triage_questions.opening_question', 'opening_question', ([], {}), '()\n', (1024, 1026), False, 'from exporter.apply_for_a_licence.forms.triage_questions import opening_question, export_licence_questions, MOD_questions, transhipment_questions, trade_control_licence_questions\n'), ((1187, 1248), 'django.urls.reverse_lazy', 'reverse_lazy', (['f"""apply_for_a_licence:{licence_type}_questions"""'], {}), "(f'apply_for_a_licence:{licence_type}_questions')\n", (1199, 1248), False, 'from django.urls import reverse_lazy, reverse\n'), ((1376, 1415), 'exporter.apply_for_a_licence.forms.triage_questions.export_licence_questions', 'export_licence_questions', (['request', 'None'], {}), '(request, None)\n', (1400, 1415), False, 'from exporter.apply_for_a_licence.forms.triage_questions import opening_question, export_licence_questions, MOD_questions, transhipment_questions, trade_control_licence_questions\n'), ((2344, 2384), 'exporter.apply_for_a_licence.forms.triage_questions.trade_control_licence_questions', 'trade_control_licence_questions', (['request'], {}), '(request)\n', (2375, 2384), False, 'from exporter.apply_for_a_licence.forms.triage_questions import opening_question, export_licence_questions, MOD_questions, transhipment_questions, trade_control_licence_questions\n'), ((2898, 2929), 'exporter.apply_for_a_licence.forms.triage_questions.transhipment_questions', 'transhipment_questions', (['request'], {}), '(request)\n', (2920, 2929), False, 'from exporter.apply_for_a_licence.forms.triage_questions import opening_question, export_licence_questions, MOD_questions, transhipment_questions, trade_control_licence_questions\n'), ((3488, 3507), 'exporter.apply_for_a_licence.forms.triage_questions.MOD_questions', 'MOD_questions', (['None'], {}), '(None)\n', (3501, 3507), False, 'from exporter.apply_for_a_licence.forms.triage_questions import opening_question, export_licence_questions, MOD_questions, transhipment_questions, trade_control_licence_questions\n'), ((3769, 3826), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""applications:task_list"""'], {'kwargs': "{'pk': pk}"}), "('applications:task_list', kwargs={'pk': pk})\n", (3781, 3826), False, 'from django.urls import reverse_lazy, reverse\n'), ((3959, 4004), 'exporter.apply_for_a_licence.forms.open_general_licences.open_general_licence_forms', 'open_general_licence_forms', (['request'], {}), '(request, **kwargs)\n', (3985, 4004), False, 'from exporter.apply_for_a_licence.forms.open_general_licences import open_general_licence_forms, open_general_licence_submit_success_page\n'), ((4550, 4609), 'exporter.apply_for_a_licence.forms.open_general_licences.open_general_licence_submit_success_page', 'open_general_licence_submit_success_page', (['request'], {}), '(request, **kwargs)\n', (4590, 4609), False, 'from exporter.apply_for_a_licence.forms.open_general_licences import open_general_licence_forms, open_general_licence_submit_success_page\n'), ((1989, 2075), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""apply_for_a_licence:ogl_questions"""'], {'kwargs': "{'ogl': CaseTypes.OGEL}"}), "('apply_for_a_licence:ogl_questions', kwargs={'ogl': CaseTypes.\n OGEL})\n", (2001, 2075), False, 'from django.urls import reverse_lazy, reverse\n'), ((2153, 2210), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""applications:task_list"""'], {'kwargs': "{'pk': pk}"}), "('applications:task_list', kwargs={'pk': pk})\n", (2165, 2210), False, 'from django.urls import reverse_lazy, reverse\n'), ((2549, 2636), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""apply_for_a_licence:ogl_questions"""'], {'kwargs': "{'ogl': CaseTypes.OGTCL}"}), "('apply_for_a_licence:ogl_questions', kwargs={'ogl': CaseTypes.\n OGTCL})\n", (2561, 2636), False, 'from django.urls import reverse_lazy, reverse\n'), ((2714, 2771), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""applications:task_list"""'], {'kwargs': "{'pk': pk}"}), "('applications:task_list', kwargs={'pk': pk})\n", (2726, 2771), False, 'from django.urls import reverse_lazy, reverse\n'), ((3140, 3226), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""apply_for_a_licence:ogl_questions"""'], {'kwargs': "{'ogl': CaseTypes.OGTL}"}), "('apply_for_a_licence:ogl_questions', kwargs={'ogl': CaseTypes.\n OGTL})\n", (3152, 3226), False, 'from django.urls import reverse_lazy, reverse\n'), ((3304, 3361), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""applications:task_list"""'], {'kwargs': "{'pk': pk}"}), "('applications:task_list', kwargs={'pk': pk})\n", (3316, 3361), False, 'from django.urls import reverse_lazy, reverse\n')] |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import librosa
import os
import sys
import time
from datetime import datetime
from pathlib import Path
from src.python.audio_transforms import *
from src.python.model_predict import *
from src.python.graphics import plot_graph
# Hardcoding a few variables
max_chroma_sample = 6145
max_spectrogram_sample = 6145
model_classes = [(0, 'artifact'), (1, 'extra'), (2, 'murmur'), (3, 'normal')]
# Directories
DIR_ROOT = Path().resolve()
DIR_PARENT = Path().resolve().parent
def import_wav(filepath):
'''
Takes a filepath and returns the
sample rate (sr) and amplitude (x)
'''
try:
x, sr = librosa.load(filepath)
x, _ = librosa.effects.trim(x)
except FileNotFoundError:
raise FileNotFoundError(f'could not file a file at {filepath}')
return x, sr
# ----------------------------------
# MAIN FUNCTION --------------------
# ----------------------------------
def main(wav_path,
max_chroma_sample,
max_spect_sample,
dt_string):
audio_results = {}
base_path = Path(DIR_ROOT, 'demo_files', 'results')
# 0. SAVE RECORD SOMEWHERE
## Placeholder for now
# 1. Open wav file with Librosa
x, sr = import_wav(wav_path)
# 2. Spectogram
audio_results['spectogram'] = amp_to_db(
freq_array = stft_transform(amp_array = x),
sr = sr,
ref = np.max
)
# 3. MFCC
audio_results['mfcc'] = mfcc_spectogram(
amp_array = x,
sr = sr
)
# 4. Chromagram
audio_results['chromagram'] = chromagram(
amp_array = x,
sr = sr
)
# 5. Create Images (User)
for key, value in audio_results.items():
plot_graph(
audio_array = value,
viz_type = key,
out_file = Path(base_path, 'user_images', "_".join([dt_string, key]) + '.png'),
user = True,
dpi = 150
)
# 6. Pad Images
for key, value in audio_results.items():
audio_results[key] = pad_along_axis(value, max_spectrogram_sample)
# 6. Create Images (Model)
img_path = {}
for key, value in audio_results.items():
file_path = Path(base_path, 'model_images', "_".join([key, dt_string]) + '.png')
plot_graph(
audio_array = value,
viz_type = key,
out_file = file_path,
user = False,
dpi = 200
)
img_path[key] = str(file_path)
# Return all 3 images to be pushed to model for predictions
return img_path
if __name__ == '__main__':
wav_path = sys.argv[1]
if not Path(wav_path).is_file():
raise FileNotFoundError()
dt_string = str(round(datetime.now().timestamp()))
hb_images = main(
wav_path,
max_chroma_sample,
max_spectrogram_sample,
dt_string
)
results = []
for key, value in hb_images.items():
output, predict = predict_heartbeat(key, value, DIR_ROOT)
results.append(output.detach().numpy()[0])
results = np.array(results)
index = results.mean(axis=0).argmax()
hb_predict = model_classes[index][1].title()
if hb_predict.lower() == 'artifact':
m = "Too much backgound noise. Try again!"
else:
m = f"Your heartbeat is....... {hb_predict}"
print(m)
| [
"pathlib.Path",
"src.python.graphics.plot_graph",
"numpy.array",
"datetime.datetime.now",
"librosa.effects.trim",
"librosa.load"
] | [((1090, 1129), 'pathlib.Path', 'Path', (['DIR_ROOT', '"""demo_files"""', '"""results"""'], {}), "(DIR_ROOT, 'demo_files', 'results')\n", (1094, 1129), False, 'from pathlib import Path\n'), ((3100, 3117), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (3108, 3117), True, 'import numpy as np\n'), ((481, 487), 'pathlib.Path', 'Path', ([], {}), '()\n', (485, 487), False, 'from pathlib import Path\n'), ((666, 688), 'librosa.load', 'librosa.load', (['filepath'], {}), '(filepath)\n', (678, 688), False, 'import librosa\n'), ((700, 723), 'librosa.effects.trim', 'librosa.effects.trim', (['x'], {}), '(x)\n', (720, 723), False, 'import librosa\n'), ((2307, 2395), 'src.python.graphics.plot_graph', 'plot_graph', ([], {'audio_array': 'value', 'viz_type': 'key', 'out_file': 'file_path', 'user': '(False)', 'dpi': '(200)'}), '(audio_array=value, viz_type=key, out_file=file_path, user=False,\n dpi=200)\n', (2317, 2395), False, 'from src.python.graphics import plot_graph\n'), ((511, 517), 'pathlib.Path', 'Path', ([], {}), '()\n', (515, 517), False, 'from pathlib import Path\n'), ((2664, 2678), 'pathlib.Path', 'Path', (['wav_path'], {}), '(wav_path)\n', (2668, 2678), False, 'from pathlib import Path\n'), ((2751, 2765), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2763, 2765), False, 'from datetime import datetime\n')] |
import socket
import struct
import json
import time
import os
import platform
from optparse import OptionParser
import sys
import xml.etree.ElementTree as ET
import config
from device_config import BASE_CONST
MCAST_GRP = '192.168.3.11'
MCAST_PORT = 8427
DEFAULT_DCID_XML = '/Applications/Shure Update Utility.app/Contents/Resources/DCIDMap.xml'
deviceList = {}
discovered = []
# https://stackoverflow.com/questions/603852/multicast-in-python
def discover():
dcid_restore_from_file(config.app_dir('dcid.json'))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) #mac fix
sock.bind((MCAST_GRP, MCAST_PORT)) # use MCAST_GRP instead of '' to listen only
# to MCAST_GRP, not all groups on MCAST_PORT
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
data, (ip, _) = sock.recvfrom(1024)
data = data.decode('UTF-8', errors="ignore")
try:
process_discovery_packet(ip, data)
except:
pass
def process_discovery_packet(ip, data):
dcid = dcid_find(data)
device = dcid_get(dcid)
rx_type, channels = dcid_model_lookup(device['model'])
if __name__ == '__main__':
print('RX: {} at: {} DCID: {} BAND: {} CHANNELS: {}'.format(rx_type, ip, dcid, device['band'], channels))
add_rx_to_dlist(ip, rx_type, channels)
def dcid_find(data):
dcid = ''
data = data.split(',')
for i in data:
i = i.strip('()')
if 'cd:' in i:
i = i.split('cd:')[-1]
dcid = i
return dcid
def dcid_get(dcid):
return deviceList[dcid]
def dcid_model_lookup(name):
for (type_k, type_v) in BASE_CONST.items():
for (model_k, model_v) in type_v['DCID_MODEL'].items():
if name == model_k:
# print('Type: {} DCID_MODEL: {} Channels: {}'.format(type_k, model_k, model_v))
return (type_k, model_v)
return None
def add_rx_to_dlist(ip, rx_type, channels):
rx = next((x for x in discovered if x['ip'] == ip), None)
if rx:
rx['timestamp'] = time.time()
else:
discovered.append({
'ip' : ip,
'type': rx_type,
'channels': channels,
'timestamp': time.time()
})
discovered.sort(key=lambda x: x['ip'])
def time_filterd_discovered_list():
out = []
for i in discovered:
if (time.time() - i['timestamp']) < 30:
out.append(i)
return out
def DCID_Parse(file):
tree = ET.parse(file)
root = tree.getroot()
devices = root.findall('./MapEntry')
for device in devices:
model = device.find('Key').text
model_name = device.find('ModelName').text
dcid = []
for dccid in device.find('DCIDList').iter('DCID'):
try:
band = dccid.attrib['band']
except:
band = ''
dev = {'model': model,'model_name':model_name, 'band':band }
deviceList[dccid.text] = dev
def dcid_save_to_file(file):
with open(file, 'w') as f:
json.dump(deviceList, f, indent=2, separators=(',', ': '), sort_keys=True)
f.write('\n')
def dcid_restore_from_file(file):
global deviceList
with open(file,'r') as f:
deviceList = json.load(f)
def updateDCIDmap(inputFile, outputFile):
DCID_Parse(inputFile)
dcid_save_to_file(outputFile)
def DCIDMapCheck():
if platform.system() == 'Darwin' and os.path.isfile(DEFAULT_DCID_XML):
return DEFAULT_DCID_XML
return None
def main():
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-i", "--input", dest="input_file",
help="DCID input file")
parser.add_option("-o", "--output", dest="output_file",
help="output file")
parser.add_option("-c", "--convert", default=False,
action="store_true", dest="convert",
help="Generate dcid.json from input DCIDMap.xml file")
parser.add_option("-d", "--discover", default=True,
action="store_true", dest="discover",
help="Discover Shure devices on the network")
(options, args) = parser.parse_args()
if options.convert:
if not options.output_file:
print("use -o to specify a DCID output file destination")
sys.exit()
if options.input_file:
p = options.input_file
elif DCIDMapCheck():
p = DCIDMapCheck()
else:
print("Specify an input DCIDMap.xml file with -i or install Wireless Workbench")
sys.exit()
if p:
updateDCIDmap(p, options.output_file)
print("Converting {} to {}".format(p, options.output_file))
sys.exit()
if options.discover:
print("lets discover some stuff")
discover()
if __name__ == '__main__':
main()
| [
"xml.etree.ElementTree.parse",
"socket.socket",
"device_config.BASE_CONST.items",
"optparse.OptionParser",
"os.path.isfile",
"platform.system",
"config.app_dir",
"socket.inet_aton",
"sys.exit",
"json.load",
"time.time",
"json.dump"
] | [((535, 603), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM', 'socket.IPPROTO_UDP'], {}), '(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n', (548, 603), False, 'import socket\n'), ((1916, 1934), 'device_config.BASE_CONST.items', 'BASE_CONST.items', ([], {}), '()\n', (1932, 1934), False, 'from device_config import BASE_CONST\n'), ((2761, 2775), 'xml.etree.ElementTree.parse', 'ET.parse', (['file'], {}), '(file)\n', (2769, 2775), True, 'import xml.etree.ElementTree as ET\n'), ((3863, 3882), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (3875, 3882), False, 'from optparse import OptionParser\n'), ((495, 522), 'config.app_dir', 'config.app_dir', (['"""dcid.json"""'], {}), "('dcid.json')\n", (509, 522), False, 'import config\n'), ((934, 961), 'socket.inet_aton', 'socket.inet_aton', (['MCAST_GRP'], {}), '(MCAST_GRP)\n', (950, 961), False, 'import socket\n'), ((2332, 2343), 'time.time', 'time.time', ([], {}), '()\n', (2341, 2343), False, 'import time\n'), ((3331, 3405), 'json.dump', 'json.dump', (['deviceList', 'f'], {'indent': '(2)', 'separators': "(',', ': ')", 'sort_keys': '(True)'}), "(deviceList, f, indent=2, separators=(',', ': '), sort_keys=True)\n", (3340, 3405), False, 'import json\n'), ((3536, 3548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3545, 3548), False, 'import json\n'), ((3714, 3746), 'os.path.isfile', 'os.path.isfile', (['DEFAULT_DCID_XML'], {}), '(DEFAULT_DCID_XML)\n', (3728, 3746), False, 'import os\n'), ((5067, 5077), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5075, 5077), False, 'import sys\n'), ((3680, 3697), 'platform.system', 'platform.system', ([], {}), '()\n', (3695, 3697), False, 'import platform\n'), ((4652, 4662), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4660, 4662), False, 'import sys\n'), ((2494, 2505), 'time.time', 'time.time', ([], {}), '()\n', (2503, 2505), False, 'import time\n'), ((2649, 2660), 'time.time', 'time.time', ([], {}), '()\n', (2658, 2660), False, 'import time\n'), ((4911, 4921), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4919, 4921), False, 'import sys\n')] |
import fastai
from neptune.new.integrations.fastai import NeptuneCallback
from fastai.vision.all import *
import neptune.new as neptune
run = neptune.init(
project="common/fastai-integration", api_token="<PASSWORD>", tags="basic"
)
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_csv(path)
# Log all training phases of the learner
learn = cnn_learner(dls, resnet18, cbs=[NeptuneCallback(run=run, base_namespace="experiment")])
learn.fit_one_cycle(2)
learn.fit_one_cycle(1)
run.stop()
| [
"neptune.new.integrations.fastai.NeptuneCallback",
"neptune.new.init"
] | [((143, 234), 'neptune.new.init', 'neptune.init', ([], {'project': '"""common/fastai-integration"""', 'api_token': '"""<PASSWORD>"""', 'tags': '"""basic"""'}), "(project='common/fastai-integration', api_token='<PASSWORD>',\n tags='basic')\n", (155, 234), True, 'import neptune.new as neptune\n'), ((393, 446), 'neptune.new.integrations.fastai.NeptuneCallback', 'NeptuneCallback', ([], {'run': 'run', 'base_namespace': '"""experiment"""'}), "(run=run, base_namespace='experiment')\n", (408, 446), False, 'from neptune.new.integrations.fastai import NeptuneCallback\n')] |
import unittest
import numpy as np
from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, \
get_time_series_for_geometry, get_time_series_for_geometry_collection
from ..helpers import new_test_service_context
class TimeSeriesControllerTest(unittest.TestCase):
def test_get_time_series_for_point_invalid_lat_and_lon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=-150.0, lat=-30.0)
expected_dict = {'results': []}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_one_valid(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'),
max_valids=1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_only_valids(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'),
max_valids=-1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_point_with_uncertainty(self):
ctx = new_test_service_context()
time_series = get_time_series_for_point(ctx, 'demo-1w', 'conc_tsm',
lon=2.1, lat=51.4,
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-22T00:00:00Z',
'result': {'average': 3.534773588180542,
'uncertainty': 0.0,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-29T00:00:00Z',
'result': {'average': 20.12085723876953,
'uncertainty': 0.0,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Point", coordinates=[2.1, 51.4]),
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_polygon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.], [1., 51.]
]]))
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 49.71656646340396,
'totalCount': 1,
'validCount': 132716}},
{'date': '2017-01-30T10:46:34Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometry_polygon_one_valid(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry(ctx, 'demo', 'conc_tsm',
dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.], [1., 51.]
]]), max_valids=1)
expected_dict = {'results': [{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}}]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometries_incl_point(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry_collection(ctx,
'demo', 'conc_tsm',
dict(type="GeometryCollection",
geometries=[
dict(type="Point", coordinates=[2.1, 51.4])]),
start_date=np.datetime64('2017-01-15'),
end_date=np.datetime64('2017-01-29'))
expected_dict = {'results': [[{'date': '2017-01-16T10:09:22Z',
'result': {'average': 3.534773588180542,
'totalCount': 1,
'validCount': 1}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 20.12085723876953,
'totalCount': 1,
'validCount': 1}}]]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_for_geometries_incl_polygon(self):
ctx = new_test_service_context()
time_series = get_time_series_for_geometry_collection(ctx,
'demo', 'conc_tsm',
dict(type="GeometryCollection",
geometries=[dict(type="Polygon", coordinates=[[
[1., 51.], [2., 51.], [2., 52.], [1., 52.],
[1., 51.]
]])]))
expected_dict = {'results': [[{'date': '2017-01-16T10:09:22Z',
'result': {'average': 56.0228561816751,
'totalCount': 1,
'validCount': 122738}},
{'date': '2017-01-25T09:35:51Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-26T10:50:17Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}},
{'date': '2017-01-28T09:58:11Z',
'result': {'average': 49.71656646340396,
'totalCount': 1,
'validCount': 132716}},
{'date': '2017-01-30T10:46:34Z',
'result': {'average': None, 'totalCount': 1, 'validCount': 0}}]]}
self.assertEqual(expected_dict, time_series)
def test_get_time_series_info(self):
self.maxDiff = None
ctx = new_test_service_context()
info = get_time_series_info(ctx)
expected_dict = self._get_expected_info_dict()
self.assertEqual(expected_dict, info)
@staticmethod
def _get_expected_info_dict():
expected_dict = {'layers': []}
bounds = {'xmin': 0.0, 'ymin': 50.0,
'xmax': 5.0, 'ymax': 52.5}
demo_times = ['2017-01-16T10:09:22Z',
'2017-01-25T09:35:51Z',
'2017-01-26T10:50:17Z',
'2017-01-28T09:58:11Z',
'2017-01-30T10:46:34Z']
demo_variables = ['c2rcc_flags',
'conc_chl',
'conc_tsm',
'kd489',
'quality_flags']
for demo_variable in demo_variables:
dict_variable = {'name': f'demo.{demo_variable}', 'dates': demo_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
demo1w_times = ['2017-01-22T00:00:00Z', '2017-01-29T00:00:00Z', '2017-02-05T00:00:00Z']
for demo_variable in demo_variables:
dict_variable = {'name': f'demo-1w.{demo_variable}', 'dates': demo1w_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
dict_variable = {'name': f'demo-1w.{demo_variable}_stdev', 'dates': demo1w_times, 'bounds': bounds}
expected_dict['layers'].append(dict_variable)
return expected_dict
| [
"xcube.webapi.controllers.time_series.get_time_series_for_point",
"xcube.webapi.controllers.time_series.get_time_series_info",
"numpy.datetime64"
] | [((441, 514), 'xcube.webapi.controllers.time_series.get_time_series_for_point', 'get_time_series_for_point', (['ctx', '"""demo"""', '"""conc_tsm"""'], {'lon': '(-150.0)', 'lat': '(-30.0)'}), "(ctx, 'demo', 'conc_tsm', lon=-150.0, lat=-30.0)\n", (466, 514), False, 'from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, get_time_series_for_geometry, get_time_series_for_geometry_collection\n'), ((12635, 12660), 'xcube.webapi.controllers.time_series.get_time_series_info', 'get_time_series_info', (['ctx'], {}), '(ctx)\n', (12655, 12660), False, 'from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, get_time_series_for_geometry, get_time_series_for_geometry_collection\n'), ((943, 970), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (956, 970), True, 'import numpy as np\n'), ((1029, 1056), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (1042, 1056), True, 'import numpy as np\n'), ((2319, 2346), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (2332, 2346), True, 'import numpy as np\n'), ((2405, 2432), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (2418, 2432), True, 'import numpy as np\n'), ((3132, 3159), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (3145, 3159), True, 'import numpy as np\n'), ((3218, 3245), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (3231, 3245), True, 'import numpy as np\n'), ((4237, 4264), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (4250, 4264), True, 'import numpy as np\n'), ((4323, 4350), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (4336, 4350), True, 'import numpy as np\n'), ((5441, 5468), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (5454, 5468), True, 'import numpy as np\n'), ((5530, 5557), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (5543, 5557), True, 'import numpy as np\n'), ((9507, 9534), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-15"""'], {}), "('2017-01-15')\n", (9520, 9534), True, 'import numpy as np\n'), ((9607, 9634), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-29"""'], {}), "('2017-01-29')\n", (9620, 9634), True, 'import numpy as np\n')] |
from math import sqrt
from math import atan2
from math import asin
beta = 0.1
sampleFreq = 10.0
#Fastest implementation in python for invsqrt
def invsqrt(number):
return number ** -0.5
def update_IMU( gx, gy, gz, ax, ay, az, q0, q1, q2, q3):
gx = gx * 0.0174533
gy = gy * 0.0174533
gz = gz * 0.0174533
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * (q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * (q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * (q0 * gz + q1 * gy - q2 * gx)
if not ((ax == 0.0) and (ay == 0.0) and (az == 0.0)):
norm = invsqrt(ax * ax + ay * ay + az * az)
ax = ax * norm
ay = ay * norm
az = az * norm
two_q0 = 2.0 * q0
two_q1 = 2.0 * q1
two_q2 = 2.0 * q2
two_q3 = 2.0 * q3
four_q0 = 4.0 * q0
four_q1 = 4.0 * q1
four_q2 = 4.0 * q2
eight_q1 = 8.0 * q1
eight_q2 = 8.0 * q2
q0q0 = q0 * q0
q1q1 = q1 * q1
q2q2 = q2 * q2
q3q3 = q3 * q3
s0 = four_q0 * q2q2 + two_q2 * ax + four_q0 * q1q1 - two_q1 * ay
s1 = four_q1 * q3q3 - two_q3 * ax + 4.0 * q0q0 * q1 - two_q0 * ay - four_q1 + eight_q1 * q1q1 + eight_q1 * q2q2 + four_q1 * az
s2 = 4.0 * q0q0 * q2 + two_q0 * ax + four_q2 * q3q3 - two_q3 * ay - four_q2 + eight_q2 * q1q1 + eight_q2 * q2q2 + four_q2 * az
s3 = 4.0 * q1q1 * q3 - two_q1 * ax + 4.0 * q2q2 * q3 - two_q2 * ay
norm = invsqrt(s0 * s0 + s1 * s1 + s2 * s2 + s3 * s3)
# print(s0," ", s1," ", s2," ", s3, " ", norm, " \n")
s0 = s0 * norm
s1 = s1 * norm
s2 = s2 * norm
s3 = s3 * norm
qDot1 = qDot1 - beta * s0
qDot2 = qDot2 - beta * s1
qDot3 = qDot3 - beta * s2
qDot4 = qDot4 - beta * s3
#print(norm ,"\n")
#print(s0," ", s1," ", s2," ", s3, " \n")
#print(qDot1," ", qDot2," ", qDot3," ", qDot4, " \n")
q0 = q0 + qDot1 * (1.0 / sampleFreq)
q1 = q1 + qDot2 * (1.0 / sampleFreq)
q2 = q2 + qDot3 * (1.0 / sampleFreq)
q3 = q3 + qDot4 * (1.0 / sampleFreq)
norm = invsqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3)
q0 = q0 * norm
q1 = q1 * norm
q2 = q2 * norm
q3 = q3 * norm
return q0, q1, q2, q3
def update( gx, gy, gz, ax, ay, az, mx, my, mz, q0, q1, q2, q3):
# Usa IMU para o caso se a medição do magnetometro ser invalida
if (mx == 0.0) and (my == 0.0) and (mz == 0.0) :
q0, q1, q2, q3 = update_IMU(gx, gy, gz, ax, ay, az, q0, q1, q2, q3)
return q0, q1, q2, q3
# De graus/sec pra rad/sec
gx = gx * 0.0174533
gy = gy * 0.0174533
gz = gz * 0.0174533
# Taxa de variação do quaternion pelo giroscopio
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * (q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * (q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * (q0 * gz + q1 * gy - q2 * gx)
if not ((ax == 0.0) and (ay == 0.0) and (az == 0.0)):
# Normalizando dados do acelerometro
norm = invsqrt(ax * ax + ay * ay + az * az)
ax = ax * norm
ay = ay * norm
az = az * norm
# Normaliza magnetometro
norm = invsqrt(mx * mx + my * my + mz * mz)
mx = mx * norm
my = my * norm
mz = mz * norm
# Pra nao repetir calculos
two_q0mx = 2.0 * q0 * mx
two_q0my = 2.0 * q0 * my
two_q0mz = 2.0 * q0 * mz
two_q1mx = 2.0 * q1 * mx
two_q0 = 2.0 * q0
two_q1 = 2.0 * q1
two_q2 = 2.0 * q2
two_q3 = 2.0 * q3
two_q0q2 = 2.0 * q0 * q2
two_q2q3 = 2.0 * q2 * q3
q0q0 = q0 * q0
q0q1 = q0 * q1
q0q2 = q0 * q2
q0q3 = q0 * q3
q1q1 = q1 * q1
q1q2 = q1 * q2
q1q3 = q1 * q3
q2q2 = q2 * q2
q2q3 = q2 * q3
q3q3 = q3 * q3
# compensação da direção do campo magnetico
hx = mx * q0q0 - two_q0my * q3 + two_q0mz * q2 + mx * q1q1 + two_q1 * my * q2 + two_q1 * mz * q3 - mx * q2q2 - mx * q3q3
hy = two_q0mx * q3 + my * q0q0 - two_q0mz * q1 + two_q1mx * q2 - my * q1q1 + my * q2q2 + two_q2 * mz * q3 - my * q3q3
two_bx = sqrt(hx * hx + hy * hy)
two_bz = -two_q0mx * q2 + two_q0my * q1 + mz * q0q0 + two_q1mx * q3 - mz * q1q1 + two_q2 * my * q3 - mz * q2q2 + mz * q3q3
four_bx = 2.0 * two_bx
four_bz = 2.0 * two_bz
# Gradiente descendente
s0 = -two_q2 * (2.0 * q1q3 - two_q0q2 - ax) + two_q1 * (2.0 * q0q1 + two_q2q3 - ay) - two_bz * q2 * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (-two_bx * q3 + two_bz * q1) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + two_bx * q2 * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
s1 = two_q3 * (2.0 * q1q3 - two_q0q2 - ax) + two_q0 * (2.0 * q0q1 + two_q2q3 - ay) - 4.0 * q1 * (1 - 2.0 * q1q1 - 2.0 * q2q2 - az) + two_bz * q3 * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (two_bx * q2 + two_bz * q0) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + (two_bx * q3 - four_bz * q1) * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
s2 = -two_q0 * (2.0 * q1q3 - two_q0q2 - ax) + two_q3 * (2.0 * q0q1 + two_q2q3 - ay) - 4.0 * q2 * (1 - 2.0 * q1q1 - 2.0 * q2q2 - az) + (-four_bx * q2 - two_bz * q0) * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (two_bx * q1 + two_bz * q3) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + (two_bx * q0 - four_bz * q2) * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
s3 = two_q1 * (2.0 * q1q3 - two_q0q2 - ax) + two_q2 * (2.0 * q0q1 + two_q2q3 - ay) + (-four_bx * q3 + two_bz * q1) * (two_bx * (0.5 - q2q2 - q3q3) + two_bz * (q1q3 - q0q2) - mx) + (-two_bx * q0 + two_bz * q2) * (two_bx * (q1q2 - q0q3) + two_bz * (q0q1 + q2q3) - my) + two_bx * q1 * (two_bx * (q0q2 + q1q3) + two_bz * (0.5 - q1q1 - q2q2) - mz)
#Normalizando
norm = invsqrt(s0 * s0 + s1 * s1 + s2 * s2 + s3 * s3)
s0 = s0 * norm
s1 = s1 * norm
s2 = s2 * norm
s3 = s3 * norm
# passo do feedback
qDot1 = qDot1 - beta * s0
qDot2 = qDot2 - beta * s1
qDot3 = qDot3 - beta * s2
qDot4 = qDot4 - beta * s3
# aplicando no quaternion
q0 = q0 + qDot1 * (1.0 / sampleFreq)
q1 = q1 + qDot2 * (1.0 / sampleFreq)
q2 = q2 + qDot3 * (1.0 / sampleFreq)
q3 = q3 + qDot4 * (1.0 / sampleFreq)
# Normalizando
norm = invsqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3)
q0 = q0 * norm
q1 = q1 * norm
q2 = q2 * norm
q3 = q3 * norm
return q0, q1, q2, q3
def compute_angles(q0, q1, q2, q3):
roll = atan2(q0*q1 + q2*q3, 0.5 - q1*q1 - q2*q2);
pitch = asin(-2.0 * (q1*q3 - q0*q2));
yaw = atan2(q1*q2 + q0*q3, 0.5 - q2*q2 - q3*q3);
return roll * 57.29578, pitch * 57.29578, yaw * 57.29578 + 180.0
| [
"math.asin",
"math.sqrt",
"math.atan2"
] | [((6068, 6117), 'math.atan2', 'atan2', (['(q0 * q1 + q2 * q3)', '(0.5 - q1 * q1 - q2 * q2)'], {}), '(q0 * q1 + q2 * q3, 0.5 - q1 * q1 - q2 * q2)\n', (6073, 6117), False, 'from math import atan2\n'), ((6120, 6152), 'math.asin', 'asin', (['(-2.0 * (q1 * q3 - q0 * q2))'], {}), '(-2.0 * (q1 * q3 - q0 * q2))\n', (6124, 6152), False, 'from math import asin\n'), ((6157, 6206), 'math.atan2', 'atan2', (['(q1 * q2 + q0 * q3)', '(0.5 - q2 * q2 - q3 * q3)'], {}), '(q1 * q2 + q0 * q3, 0.5 - q2 * q2 - q3 * q3)\n', (6162, 6206), False, 'from math import atan2\n'), ((3703, 3726), 'math.sqrt', 'sqrt', (['(hx * hx + hy * hy)'], {}), '(hx * hx + hy * hy)\n', (3707, 3726), False, 'from math import sqrt\n')] |
import setuptools
if __name__ == '__main__':
setuptools.setup(
name='Name',
version='0.1',
# this automatically detects the packages in the specified
# (or current directory if no directory is given).
packages=setuptools.find_packages(exclude=['tests', 'docs']),
# the entry points are the big difference between
# setuptools and distutils, the entry points make it
# possible to extend setuptools and make it smarter and/or
# add custom commands
entry_points={
# The following would add: python setup.py command_name
'distutils.commands': [
'command_name = your_package:YourClass',
],
# the following would make these functions callable as
# standalone scripts. In this case it would add the spam
# command to run in your shell.
'console_scripts': [
'spam = your_package:SpamClass',
],
},
# Packages required to use this one, it is possible to
# specify simple the application name, a specific version
# or a version range. The syntax is the same as pip accepts
install_requires=['docutils>=0.3'],
# Extra requirements are another amazing feature of setuptools,
# it allows people to install extra dependencies if your are
# interested. In this example doing a "pip install name[all]"
# would install the python-utils package as well.
extras_requires={
'all': ['python-utils'],
},
# Packages required to install this package, not just for running
# it but for the actual install. These will not be installed but
# only downloaded so they can be used during the install.
# the pytest-runner is a useful example:
setup_requires=['pytest-runner'],
# the requirements for the test command. Regular testing is possible
# through: python setup.py test. The pytest module installs a different
# command though: python setup.py pytest
tests_requires=['pytest'],
# the package_data, include_package_data and exclude_package_data
# arguments are used to specify which non-python files should be included
# in the package. An example would be documentation files. More about this
# in the next paragraph
package_data={
# include (restructured text) documentation files from any directory
'': ['*.rst'],
# include text files from the eggs package
'eggs': ['*.txt'],
},
# if a package is zip_safe the package will be installed as a zip file.
# this can be faster but it generally doesn't make too much of a difference
# and breaks packages if they need access to either the source or the data
# files. When this flag is omiited setuptools will try to autodetect based
# on the existance of datafiles and C extensions. If either exists it will
# not install the package as a zip. Generally omitting this parameter is the
# best option but if you have strange problems with missing files, try
# disabling zip_safe
zip_safe=False,
# All of the following fields are PyPI metadata fields. When registering a
# package at PyPi this is used as information on the package page.
author='<NAME>',
author_email='<EMAIL>',
# this should be a short description (one line) for the package
description='Description for the name package',
# For this parameter I would recommand including the README.rst
long_description="A very long description",
# Ths license should be one of the standard open source license:
# https://opensource.org/licenses/alphabetical
license='BSD',
# Homepage url for the package
url='https://wol.ph/',
)
| [
"setuptools.find_packages"
] | [((256, 307), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['tests', 'docs']"}), "(exclude=['tests', 'docs'])\n", (280, 307), False, 'import setuptools\n')] |
"""
# Interaction Tracker
# @license http://www.apache.org/licenses/LICENSE-2.0
# Author @ <NAME>, Zaki
"""
from analytics.models import (Log, ActionLog)
from rest_framework import serializers
class LogSerializer(serializers.ModelSerializer):
class Meta:
model = Log
fields = ('app','appuser','country','screen_resolution','user_agent','action_name', 'entry_screen', 'exit_screen','visit_time', 'first_visit_timestamp' ,'prevoius_visit_timestamp','language', 'event_action','event_category','event_name','event_value')
class ActionLogSerializer(serializers.ModelSerializer):
logs = LogSerializer(many=True)
action_name = serializers.HiddenField(default="Request")
class Meta:
model = ActionLog
fields = ('action_name', 'logs')
def create(self, validated_data):
logs_data = validated_data.pop('logs')
actionlog = ActionLog.objects.create(**validated_data)
for logs_data in logs_data:
Log.objects.create(actionlog=actionlog, **logs_data)
return actionlog
| [
"analytics.models.ActionLog.objects.create",
"rest_framework.serializers.HiddenField",
"analytics.models.Log.objects.create"
] | [((658, 700), 'rest_framework.serializers.HiddenField', 'serializers.HiddenField', ([], {'default': '"""Request"""'}), "(default='Request')\n", (681, 700), False, 'from rest_framework import serializers\n'), ((891, 933), 'analytics.models.ActionLog.objects.create', 'ActionLog.objects.create', ([], {}), '(**validated_data)\n', (915, 933), False, 'from analytics.models import Log, ActionLog\n'), ((982, 1034), 'analytics.models.Log.objects.create', 'Log.objects.create', ([], {'actionlog': 'actionlog'}), '(actionlog=actionlog, **logs_data)\n', (1000, 1034), False, 'from analytics.models import Log, ActionLog\n')] |
"""
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import json
import logging
import serial
import serialfunctions as sf
import sys
import time
import wx
from wx.lib.pubsub import setuparg1
from wx.lib.pubsub import pub
#------------------------------------------------#
# workbench
#------------------------------------------------#
class PowerSupply(wx.Panel):
def __init__(self, parent, port, data):
wx.Panel.__init__(self, parent)
self.psu_connection = None
self.port = port
self.manufacturer = data["manufacturer"]
self.send_bytes = data["sendbytes"]
self.end_line = data["endline"]
self.channels = data["channels"]
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(self)
text.SetLabel("Note: channel numbers do not necessarily indicate left-to-right"
+" on the power supply itself")
hsizer.Add(text, 0, wx.ALL|wx.EXPAND, 5)
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.volt_channels = {}
self.amp_channels = {}
for n in self.channels:
channel_box = wx.StaticBox(self, label="Channel " +str(n))
channel_box_sizer = wx.StaticBoxSizer(channel_box, wx.HORIZONTAL)
volt_sizer = wx.BoxSizer(wx.VERTICAL)
self.volt_channels[n] = wx.TextCtrl(self)
# self.volt_channels[n].SetFont(DIGITAL_FONT)
volt_set = wx.Button(self, label="Set V", size=(-1, 24))
volt_sizer.Add(self.volt_channels[n], 0, wx.ALL|wx.EXPAND, 5)
volt_sizer.Add(volt_set, 0, wx.ALL|wx.EXPAND, 5)
amp_sizer = wx.BoxSizer(wx.VERTICAL)
self.amp_channels[n] = wx.TextCtrl(self)
amp_set = wx.Button(self, label="Set A", size=(-1, 24))
amp_sizer.Add(self.amp_channels[n], 0, wx.ALL|wx.EXPAND, 5)
amp_sizer.Add(amp_set, 0, wx.ALL|wx.EXPAND, 5)
channel_box_sizer.Add(volt_sizer, 1, wx.ALL|wx.EXPAND, 5)
channel_box_sizer.Add(amp_sizer, 1, wx.ALL|wx.EXPAND, 5)
hsizer2.Add(channel_box_sizer, 0, wx.ALL|wx.EXPAND, 5)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
sizer.Add(hsizer2, 1, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer)
self.ConnectToPSU(self.port)
def ConnectToPSU(self, port):
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(port=port,
baudrate=9600,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.SEVENBITS)
print(ser)
ser.isOpen()
self.psu_connection = ser
# self.timer_update_channel.Start(1)
self.RefreshReadings()
def RefreshReadings(self):
if not self.psu_connection:
return
# get voltage of output in Volts
for ch in self.volt_channels:
cmd = "V" +str(ch) + "?"
reading = self.SendToSerial(cmd)
self.volt_channels[ch].SetValue(reading)
# get current limits of output in Amp
for ch in self.amp_channels:
cmd = "I" +str(ch) + "?"
reading = self.SendToSerial(cmd)
self.amp_channels[ch].SetValue(reading)
def SendToSerial(self, input):
end = self.end_line
ser = self.psu_connection
ser.write(bytes(input + end, "utf8"))
time.sleep(0.1)
out = ""
while ser.inWaiting() > 0:
# print(ser.read(1))
out += str(ser.read(1), "utf8")
return out
def UpdateChannel(self, event):
if not self.psu_connection:
return
v1 = self.SendToSerial(self.psu_connection, "V1?")
self.display_voltage1.SetValue(v1)
def DoStepVoltage(self):
channel = 2 # available channels 0 or 1
for v in range(0, 15):
input = "V" + str(channel) + " " + str(v)
out = self.SendToSerial(self.psu_connection, input)
class Multimeter(wx.Panel):
def __init__(self, parent, data):
wx.Panel.__init__(self, parent)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
name = e.GetName()
if name == "Instrument List":
if label == "Refresh Instruments":
self.DoRefreshInstruments()
| [
"wx.Button",
"wx.BoxSizer",
"time.sleep",
"wx.StaticBoxSizer",
"wx.StaticText",
"wx.TextCtrl",
"serial.Serial",
"wx.Panel.__init__"
] | [((624, 655), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (641, 655), False, 'import wx\n'), ((932, 956), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (943, 956), False, 'import wx\n'), ((983, 1009), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (994, 1009), False, 'import wx\n'), ((1025, 1044), 'wx.StaticText', 'wx.StaticText', (['self'], {}), '(self)\n', (1038, 1044), False, 'import wx\n'), ((1265, 1291), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (1276, 1291), False, 'import wx\n'), ((2865, 2992), 'serial.Serial', 'serial.Serial', ([], {'port': 'port', 'baudrate': '(9600)', 'parity': 'serial.PARITY_ODD', 'stopbits': 'serial.STOPBITS_TWO', 'bytesize': 'serial.SEVENBITS'}), '(port=port, baudrate=9600, parity=serial.PARITY_ODD, stopbits=\n serial.STOPBITS_TWO, bytesize=serial.SEVENBITS)\n', (2878, 2992), False, 'import serial\n'), ((3989, 4004), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3999, 4004), False, 'import time\n'), ((4688, 4719), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (4705, 4719), False, 'import wx\n'), ((4745, 4771), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (4756, 4771), False, 'import wx\n'), ((1491, 1536), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['channel_box', 'wx.HORIZONTAL'], {}), '(channel_box, wx.HORIZONTAL)\n', (1508, 1536), False, 'import wx\n'), ((1562, 1586), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1573, 1586), False, 'import wx\n'), ((1635, 1652), 'wx.TextCtrl', 'wx.TextCtrl', (['self'], {}), '(self)\n', (1646, 1652), False, 'import wx\n'), ((1734, 1779), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Set V"""', 'size': '(-1, 24)'}), "(self, label='Set V', size=(-1, 24))\n", (1743, 1779), False, 'import wx\n'), ((1960, 1984), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1971, 1984), False, 'import wx\n'), ((2020, 2037), 'wx.TextCtrl', 'wx.TextCtrl', (['self'], {}), '(self)\n', (2031, 2037), False, 'import wx\n'), ((2060, 2105), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Set A"""', 'size': '(-1, 24)'}), "(self, label='Set A', size=(-1, 24))\n", (2069, 2105), False, 'import wx\n')] |
# -*- coding: utf-8 -*-
"""\
Coroutine utilities
-------------------
Some code snippets inspired by http://www.dabeaz.com/coroutines/
"""
import re
import functools
def coroutine(func):
"""Prime a coroutine for send commands.
Args:
func (coroutine): A function that takes values via yield
Return:
function: Wrapped coroutine function
"""
@functools.wraps(func)
def _func(*args, **kwargs):
fn = func(*args, **kwargs)
next(fn)
return fn
return _func
@coroutine
def echo(**kwargs):
"""A simple output sink
Useful as a consumer of data from other coroutines that just print to console
"""
while True:
output = (yield)
print(output, **kwargs)
@coroutine
def grep(pattern, targets,
send_close=True,
matcher="search",
flags=0):
"""Unix grep-like utility
Feeds lines matching a target to consumer targets registered with this function
Args:
pattern (str): A regular expression as string (compiled internally)
targets (list): A list of consumer coroutines that want to act on matching lines
send_close (bool): If True, closes targets when grep exits
matcher: ``search``, ``match``, ``findall`` methods of regular expression
flags: Regexp flags used when compiling pattern
"""
pat = re.compile(pattern, flags=flags)
sfunc = getattr(pat, matcher)
try:
while True:
line = (yield)
mat = sfunc(line)
if mat:
for tgt in targets:
tgt.send(mat)
except GeneratorExit:
if send_close:
for tgt in targets:
tgt.close()
| [
"functools.wraps",
"re.compile"
] | [((381, 402), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (396, 402), False, 'import functools\n'), ((1371, 1403), 're.compile', 're.compile', (['pattern'], {'flags': 'flags'}), '(pattern, flags=flags)\n', (1381, 1403), False, 'import re\n')] |
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.orm import relationship
import datetime
from database import Base
class Org(Base):
__tablename__ = "orgs"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, index=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
buildings = relationship("Building", back_populates="org")
class Building(Base):
__tablename__ = "buildings"
id = Column(Integer, primary_key=True, index=True)
org_id = Column(Integer, ForeignKey(Org.id))
name = Column(String, unique=True, index=True)
address = Column(String)
org = relationship("Org", back_populates="buildings")
groups = relationship("Group", back_populates="building")
class Group(Base):
__tablename__ = "groups"
id = Column(Integer, primary_key=True, index=True)
building_id = Column(Integer, ForeignKey(Building.id))
name = Column(String, index=True)
building = relationship("Building", back_populates="groups")
points = relationship("Point", back_populates="building")
class Point(Base):
__tablename__ = "points"
id = Column(Integer, primary_key=True, index=True)
group_id = Column(Integer, ForeignKey(Building.id))
device_id = Column(Integer, index=True)
name = Column(String)
location = Column(String)
building = relationship("Group", back_populates="points") | [
"sqlalchemy.orm.relationship",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] | [((217, 262), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (223, 262), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((274, 313), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)', 'index': '(True)'}), '(String, unique=True, index=True)\n', (280, 313), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((331, 381), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.datetime.utcnow'}), '(DateTime, default=datetime.datetime.utcnow)\n', (337, 381), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((399, 445), 'sqlalchemy.orm.relationship', 'relationship', (['"""Building"""'], {'back_populates': '"""org"""'}), "('Building', back_populates='org')\n", (411, 445), False, 'from sqlalchemy.orm import relationship\n'), ((512, 557), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (518, 557), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((618, 657), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)', 'index': '(True)'}), '(String, unique=True, index=True)\n', (624, 657), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((672, 686), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (678, 686), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((698, 745), 'sqlalchemy.orm.relationship', 'relationship', (['"""Org"""'], {'back_populates': '"""buildings"""'}), "('Org', back_populates='buildings')\n", (710, 745), False, 'from sqlalchemy.orm import relationship\n'), ((759, 807), 'sqlalchemy.orm.relationship', 'relationship', (['"""Group"""'], {'back_populates': '"""building"""'}), "('Group', back_populates='building')\n", (771, 807), False, 'from sqlalchemy.orm import relationship\n'), ((868, 913), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (874, 913), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((984, 1010), 'sqlalchemy.Column', 'Column', (['String'], {'index': '(True)'}), '(String, index=True)\n', (990, 1010), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((1027, 1076), 'sqlalchemy.orm.relationship', 'relationship', (['"""Building"""'], {'back_populates': '"""groups"""'}), "('Building', back_populates='groups')\n", (1039, 1076), False, 'from sqlalchemy.orm import relationship\n'), ((1090, 1138), 'sqlalchemy.orm.relationship', 'relationship', (['"""Point"""'], {'back_populates': '"""building"""'}), "('Point', back_populates='building')\n", (1102, 1138), False, 'from sqlalchemy.orm import relationship\n'), ((1199, 1244), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (1205, 1244), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((1317, 1344), 'sqlalchemy.Column', 'Column', (['Integer'], {'index': '(True)'}), '(Integer, index=True)\n', (1323, 1344), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((1356, 1370), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1362, 1370), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((1386, 1400), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1392, 1400), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((1421, 1467), 'sqlalchemy.orm.relationship', 'relationship', (['"""Group"""'], {'back_populates': '"""points"""'}), "('Group', back_populates='points')\n", (1433, 1467), False, 'from sqlalchemy.orm import relationship\n'), ((587, 605), 'sqlalchemy.ForeignKey', 'ForeignKey', (['Org.id'], {}), '(Org.id)\n', (597, 605), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((948, 971), 'sqlalchemy.ForeignKey', 'ForeignKey', (['Building.id'], {}), '(Building.id)\n', (958, 971), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n'), ((1276, 1299), 'sqlalchemy.ForeignKey', 'ForeignKey', (['Building.id'], {}), '(Building.id)\n', (1286, 1299), False, 'from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime\n')] |
#!/usr/bin/python3
import sys
import time
import array
import numpy as np
import pandas as pd
import statistics
import matplotlib.pyplot as plt
import seaborn as sns
# sns.set_theme(style="darkgrid")
x_b = [1, 10, 100, 1000, 10000, 100000, 1000000]
cyc_pi2 = [8379072, 8379072, 3675200, 372864, 37312, 3728, 368]
cyc_pi4 = [8376016, 8376016, 8376016, 1865072, 186752, 18664, 1864]
cyc_lap = [8372616, 8372616, 8372616, 2145304, 214464, 21376, 2072]
# print("Correlation:", np.corrcoef(x_b, cyc_pi2))
# plt.bar(cyc_pi2, x_b , align='center', alpha=0.5)
# plt.legend(['CycloneDDS Laptop', 'CycloneDDS RPi4', 'CycloneDDS RPi2', 'FastDDS Laptop', 'FastDDS RP4'])
# plt.title('CycloneDDS')
barWidth = 0.25
x_pos = np.arange(len(x_b))
r1 = np.arange(len(cyc_lap))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
'''
fig, ax = plt.subplots()
rects3 = ax.bar(x_pos - 2*width/3, cyc_lap, width, label='Laptop')
rects2 = ax.bar(x_pos + width/3, cyc_pi4, width, label='RPi4')
rects3 = ax.bar(x_pos + 3*width/3, cyc_pi2, width, label='RPi2')
'''
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 22)
ax.tick_params(axis = 'both', which = 'minor', labelsize = 22)
plt.bar(r1, cyc_lap, width=barWidth, label='Laptop')
plt.bar(r2, cyc_pi4, width=barWidth, label='RPi4')
plt.bar(r3, cyc_pi2, width=barWidth, label='RPi2')
# plt.bar(x_pos, cyc_pi2, align='center', alpha=0.5)
# plt.xticks(x_pos, x_b)
plt.xticks([r + barWidth for r in range(len(cyc_lap))], x_b)
plt.ylabel('Bytes', fontsize=24)
plt.xlabel('Buffer Size', fontsize=24)
plt.title('IDL size Capacity (CycloneDDS)', fontsize=26)
plt.yscale('log')
plt.grid(b=True, which='both', color='#BBBBBB', linestyle='-', axis='y')
plt.legend(fontsize=24)
'''
plt.yscale('log')
plt.xlabel('Bytes')
plt.xticks(x_b)
plt.ylabel('Samples')
plt.grid(b=True, which='both', color='#BBBBBB', linestyle='-')
'''
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1063, 1072), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1070, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1252), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'cyc_lap'], {'width': 'barWidth', 'label': '"""Laptop"""'}), "(r1, cyc_lap, width=barWidth, label='Laptop')\n", (1207, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1303), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'cyc_pi4'], {'width': 'barWidth', 'label': '"""RPi4"""'}), "(r2, cyc_pi4, width=barWidth, label='RPi4')\n", (1260, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1354), 'matplotlib.pyplot.bar', 'plt.bar', (['r3', 'cyc_pi2'], {'width': 'barWidth', 'label': '"""RPi2"""'}), "(r3, cyc_pi2, width=barWidth, label='RPi2')\n", (1311, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1528), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bytes"""'], {'fontsize': '(24)'}), "('Bytes', fontsize=24)\n", (1506, 1528), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1567), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Buffer Size"""'], {'fontsize': '(24)'}), "('Buffer Size', fontsize=24)\n", (1539, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1624), 'matplotlib.pyplot.title', 'plt.title', (['"""IDL size Capacity (CycloneDDS)"""'], {'fontsize': '(26)'}), "('IDL size Capacity (CycloneDDS)', fontsize=26)\n", (1577, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1642), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1635, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1715), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""both"""', 'color': '"""#BBBBBB"""', 'linestyle': '"""-"""', 'axis': '"""y"""'}), "(b=True, which='both', color='#BBBBBB', linestyle='-', axis='y')\n", (1651, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1739), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(24)'}), '(fontsize=24)\n', (1726, 1739), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1896, 1898), True, 'import matplotlib.pyplot as plt\n')] |
from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import logging
bcrypt = Bcrypt()
login_manager = LoginManager()
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
debug_toolbar = DebugToolbarExtension()
gunicorn_error_logger = logging.getLogger('gunicorn.error') | [
"logging.getLogger",
"flask_login.LoginManager",
"flask_debugtoolbar.DebugToolbarExtension",
"flask_bcrypt.Bcrypt",
"flask_caching.Cache",
"flask_migrate.Migrate",
"flask_sqlalchemy.SQLAlchemy"
] | [((253, 261), 'flask_bcrypt.Bcrypt', 'Bcrypt', ([], {}), '()\n', (259, 261), False, 'from flask_bcrypt import Bcrypt\n'), ((278, 292), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (290, 292), False, 'from flask_login import LoginManager\n'), ((298, 310), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (308, 310), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((321, 330), 'flask_migrate.Migrate', 'Migrate', ([], {}), '()\n', (328, 330), False, 'from flask_migrate import Migrate\n'), ((339, 346), 'flask_caching.Cache', 'Cache', ([], {}), '()\n', (344, 346), False, 'from flask_caching import Cache\n'), ((363, 386), 'flask_debugtoolbar.DebugToolbarExtension', 'DebugToolbarExtension', ([], {}), '()\n', (384, 386), False, 'from flask_debugtoolbar import DebugToolbarExtension\n'), ((411, 446), 'logging.getLogger', 'logging.getLogger', (['"""gunicorn.error"""'], {}), "('gunicorn.error')\n", (428, 446), False, 'import logging\n')] |
import time
def f():
[
# Must be split over multiple lines to see the error.
# https://github.com/benfred/py-spy/pull/208
time.sleep(1)
for _ in range(1000)
]
f()
| [
"time.sleep"
] | [((152, 165), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (162, 165), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-UAC-FileVirtualization
GUID : c02afc2b-e24e-4449-ad76-bcc2c2575ead
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2000_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2001_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2002_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2003_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2004_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2005, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2005_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2006, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2006_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2007, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2007_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2008, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2008_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2009, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2009_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2010, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2010_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2011, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2011_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2012, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2012_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2013, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2013_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2014, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2014_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2015, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2015_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2016, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2016_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2017, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2017_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2018, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2018_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2019, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2019_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4001_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"TargetFileNameLength" / Int16ul,
"TargetFileNameBuffer" / Bytes(lambda this: this.TargetFileNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul,
"Exclusions" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5003_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5004_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
| [
"construct.Bytes",
"construct.Struct",
"etl.parsers.etw.core.guid"
] | [((537, 562), 'construct.Struct', 'Struct', (["('Error' / Int32ul)"], {}), "('Error' / Int32ul)\n", (543, 562), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((739, 764), 'construct.Struct', 'Struct', (["('Error' / Int32ul)"], {}), "('Error' / Int32ul)\n", (745, 764), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((941, 966), 'construct.Struct', 'Struct', (["('Error' / Int32ul)"], {}), "('Error' / Int32ul)\n", (947, 966), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((1143, 1168), 'construct.Struct', 'Struct', (["('Error' / Int32ul)"], {}), "('Error' / Int32ul)\n", (1149, 1168), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((1345, 1370), 'construct.Struct', 'Struct', (["('Error' / Int32ul)"], {}), "('Error' / Int32ul)\n", (1351, 1370), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((1547, 1572), 'construct.Struct', 'Struct', (["('Error' / Int32ul)"], {}), "('Error' / Int32ul)\n", (1553, 1572), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((391, 435), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (395, 435), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((593, 637), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (597, 637), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((795, 839), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (799, 839), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((997, 1041), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (1001, 1041), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((1199, 1243), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (1203, 1243), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((1401, 1445), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (1405, 1445), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((1603, 1647), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (1607, 1647), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((2147, 2191), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (2151, 2191), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((2691, 2735), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (2695, 2735), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((3235, 3279), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (3239, 3279), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((3779, 3823), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (3783, 3823), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((4323, 4367), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (4327, 4367), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((4867, 4911), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (4871, 4911), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((5411, 5455), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (5415, 5455), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((5955, 5999), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (5959, 5999), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((6499, 6543), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (6503, 6543), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((7043, 7087), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (7047, 7087), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((7587, 7631), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (7591, 7631), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((8131, 8175), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (8135, 8175), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((8675, 8719), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (8679, 8719), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((9219, 9263), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (9223, 9263), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((9843, 9887), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (9847, 9887), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((10482, 10526), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (10486, 10526), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((10999, 11043), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (11003, 11043), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((11655, 11699), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (11659, 11699), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((12242, 12286), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (12246, 12286), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((12759, 12803), 'etl.parsers.etw.core.guid', 'guid', (['"""c02afc2b-e24e-4449-ad76-bcc2c2575ead"""'], {}), "('c02afc2b-e24e-4449-ad76-bcc2c2575ead')\n", (12763, 12803), False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((1831, 1865), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (1836, 1865), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((1930, 1969), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (1935, 1969), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((2050, 2097), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (2055, 2097), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((2375, 2409), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (2380, 2409), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((2474, 2513), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (2479, 2513), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((2594, 2641), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (2599, 2641), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((2919, 2953), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (2924, 2953), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((3018, 3057), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (3023, 3057), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((3138, 3185), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (3143, 3185), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((3463, 3497), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (3468, 3497), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((3562, 3601), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (3567, 3601), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((3682, 3729), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (3687, 3729), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((4007, 4041), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (4012, 4041), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((4106, 4145), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (4111, 4145), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((4226, 4273), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (4231, 4273), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((4551, 4585), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (4556, 4585), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((4650, 4689), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (4655, 4689), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((4770, 4817), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (4775, 4817), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((5095, 5129), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (5100, 5129), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((5194, 5233), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (5199, 5233), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((5314, 5361), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (5319, 5361), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((5639, 5673), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (5644, 5673), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((5738, 5777), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (5743, 5777), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((5858, 5905), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (5863, 5905), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((6183, 6217), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (6188, 6217), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((6282, 6321), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (6287, 6321), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((6402, 6449), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (6407, 6449), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((6727, 6761), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (6732, 6761), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((6826, 6865), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (6831, 6865), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((6946, 6993), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (6951, 6993), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((7271, 7305), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (7276, 7305), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((7370, 7409), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (7375, 7409), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((7490, 7537), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (7495, 7537), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((7815, 7849), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (7820, 7849), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((7914, 7953), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (7919, 7953), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((8034, 8081), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (8039, 8081), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((8359, 8393), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (8364, 8393), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((8458, 8497), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (8463, 8497), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((8578, 8625), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (8583, 8625), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((8903, 8937), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (8908, 8937), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((9002, 9041), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (9007, 9041), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((9122, 9169), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (9127, 9169), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((9447, 9481), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (9452, 9481), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((9546, 9585), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (9551, 9585), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((9666, 9713), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (9671, 9713), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10071, 10105), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (10076, 10105), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10170, 10209), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (10175, 10209), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10290, 10337), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (10295, 10337), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10414, 10459), 'construct.Bytes', 'Bytes', (['(lambda this: this.TargetFileNameLength)'], {}), '(lambda this: this.TargetFileNameLength)\n', (10419, 10459), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10710, 10744), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (10715, 10744), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10809, 10848), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (10814, 10848), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((10929, 10976), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (10934, 10976), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((11227, 11261), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (11232, 11261), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((11326, 11365), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (11331, 11365), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((11446, 11493), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (11451, 11493), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((11883, 11917), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (11888, 11917), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((11982, 12021), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (11987, 12021), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((12102, 12149), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (12107, 12149), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((12470, 12504), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (12475, 12504), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((12569, 12608), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (12574, 12608), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((12689, 12736), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (12694, 12736), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((12987, 13021), 'construct.Bytes', 'Bytes', (['(lambda this: this.SidLength)'], {}), '(lambda this: this.SidLength)\n', (12992, 13021), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((13086, 13125), 'construct.Bytes', 'Bytes', (['(lambda this: this.FileNameLength)'], {}), '(lambda this: this.FileNameLength)\n', (13091, 13125), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((13206, 13253), 'construct.Bytes', 'Bytes', (['(lambda this: this.ProcessImageNameLength)'], {}), '(lambda this: this.ProcessImageNameLength)\n', (13211, 13253), False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n')] |
"""
Processing data in win32 format.
"""
import glob
import logging
import math
import os
import subprocess
import tempfile
from fnmatch import fnmatch
from multiprocessing import Pool, cpu_count
from subprocess import DEVNULL, PIPE, Popen
# Setup the logger
FORMAT = "[%(asctime)s] %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
class Channel:
"""Class for channel."""
def __init__(
self,
id=None,
name=None,
component=None,
latitude=None,
longitude=None,
unit=None,
gain=None,
damping=None,
period=None,
preamplification=None,
lsb_value=None,
):
"""Initialize a channel.
Parameters
----------
id: str
Channel ID.
name: str
Station Name.
component: str
Channel component name (``U|N|E``).
latitude: float
Station latitude.
longitude: float
Station longitude.
unit: str
Unit of data (``m``, ``m/s``, ``m/s/s``, ``rad``).
gain: float
Sensor sensitivity.
damping: float
Damping constant of the sensor.
period: float
Natural period of the seismometer.
preamplification:
Preamplification.
lsb_value:
LSB value.
"""
self.id = id
self.name = name
self.component = component
self.latitude = latitude
self.longitude = longitude
self.unit = unit
self.gain = gain
self.damping = damping
self.period = period
self.preamplification = preamplification
self.lsb_value = lsb_value
def extract_sac(
data,
ctable,
suffix="SAC",
outdir=".",
pmax=8640000,
filter_by_id=None,
filter_by_name=None,
filter_by_component=None,
with_pz=False,
processes=0,
):
"""Extract data as SAC format files.
Parameters
----------
data: str
win32 file to be processed.
ctable: str
Channel table file.
suffix: str
Suffix of output SAC files. Defaults to ``SAC``.
outdir: str
Output directory. Defaults to current directory.
pmax: int
Maximum number of data points. Defaults to 8640000. If the input data
is longer than one day, you have to to increase ``pmax``.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
with_pz: bool
Extract PZ files at the same time.
PZ file has default suffix ``.SAC_PZ``.
processes: int
Number of parallel processes to speed up data extraction.
Use all processes by default.
Note
----
``win2sac`` removes sensitivity from waveform, then multiply by 1.0e9.
Thus the extracted SAC files are velocity in nm/s, or acceleration in nm/s/s.
Examples
--------
>>> extract_sac("0101_201001010000_5.cnt", "0101_20100101.ch")
Extract all channel with specified SAC suffix and output directory:
>>> extract_sac(
... "0101_201001010000_5.cnt",
... "0101_20100101.ch",
... suffix="",
... outdir="20100101000",
... )
Extract only specified channels:
>>> extract_sac(
... "0101_201001010000_5.cnt",
... "0101_20100101.ch",
... filter_by_name="N.NA*",
... filter_by_component="[NE]",
... )
"""
if not (data and ctable):
logger.error("data or ctable is `None'. Data requests may fail. Skipped.")
return
channels = _get_channels(ctable)
logger.info(f"{len(channels)} channels found in {ctable}.")
if filter_by_id or filter_by_name or filter_by_component:
channels = _filter_channels(
channels, filter_by_id, filter_by_name, filter_by_component
)
logger.info(f"{len(channels)} channels to be extracted.")
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
with Pool(processes=_get_processes(processes)) as pool:
with tempfile.NamedTemporaryFile() as ft:
_write_winprm(ctable, ft.name)
args = [(data, ch, suffix, outdir, ft.name, pmax) for ch in channels]
sacfiles = pool.starmap(_extract_channel, args)
logger.info(
"{} SAC data successfully extracted.".format(
len(sacfiles) - sacfiles.count(None)
)
)
if with_pz:
# "SAC_PZ" here is hardcoded.
args = [(ch, "SAC_PZ", outdir) for ch in channels]
pzfiles = pool.starmap(_extract_sacpz, args)
logger.info(
"{} SAC PZ files successfully extracted.".format(
len(pzfiles) - pzfiles.count(None)
)
)
def _get_processes(procs):
"""Choose the best number of processes."""
cpus = cpu_count()
if cpus == 1:
return cpus
if not 0 < procs < cpus:
return cpus - 1
return procs
def extract_pz(
ctable,
suffix="SAC_PZ",
outdir=".",
keep_sensitivity=False,
filter_by_chid=None,
filter_by_name=None,
filter_by_component=None,
):
"""Extract instrumental response in SAC PZ format from channel table.
.. warning::
Only works for instrumental responses of Hi-net network.
RESP files of F-net network can be downloaded from
`F-net website <http://www.fnet.bosai.go.jp/st_info/response.php?LANG=en>`_.
Parameters
----------
ctable: str
Channel table file.
suffix: str
Suffix of SAC PZ files. Defaults to ``SAC_PZ``.
outdir: str
Output directory. Defaults to current directory.
keep_sensivity: bool
win2sac automatically removes sensivity from waveform data
during win32 format to SAC format conversion.
So the generated polezero file should omit the sensitivity.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
Examples
--------
>>> extract_pz("0101_20100101.ch")
Extract all channel with specified suffix and output directory:
>>> extract_pz("0101_20100101.ch", suffix="", outdir="20100101000")
Extract only specified channels:
>>> extract_pz(
... "0101_20100101.ch", filter_by_name="N.NA*", filter_by_component="[NE]"
... )
"""
if not ctable:
logger.error("ctable is `None'. Data requests may fail. Skipped.")
return
channels = _get_channels(ctable)
if filter_by_chid or filter_by_name or filter_by_component:
channels = _filter_channels(
channels, filter_by_chid, filter_by_name, filter_by_component
)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
for channel in channels:
_extract_sacpz(
channel, suffix=suffix, outdir=outdir, keep_sensitivity=keep_sensitivity
)
def _get_channels(ctable):
"""Get channel information from channel table file.
Parameters
----------
ctable: str
Channle table file.
"""
channels = []
with open(ctable, "r") as f:
for line in f:
# skip blank lines and comment lines
if not line.strip() or line.strip().startswith("#"):
continue
items = line.split()
try:
channel = Channel(
id=items[0],
name=items[3],
component=items[4],
latitude=float(items[13]),
longitude=float(items[14]),
unit=items[8],
gain=float(items[7]),
damping=float(items[10]),
period=float(items[9]),
preamplification=float(items[11]),
lsb_value=float(items[12]),
)
channels.append(channel)
except ValueError as e:
logger.warning(
"Error in parsing channel information for %s.%s (%s). Skipped.",
items[3],
items[4],
items[0],
)
logger.warning("Original error message: %s", e)
return channels
def _filter_channels(
channels, filter_by_id=None, filter_by_name=None, filter_by_component=None
):
"""Filter channels by id, name and/or component.
Parameters
----------
channels: :class:`~HinetPy.win32.Channel`
Channels to be filtered.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
"""
def _filter(channels, key, filters):
filtered_channels = []
if isinstance(filters, list): # filter by list
for channel in channels:
if getattr(channel, key) in filters:
filtered_channels.append(channel)
elif isinstance(filters, str): # filter by wildcard
for channel in channels:
if fnmatch(getattr(channel, key), filters):
filtered_channels.append(channel)
else:
raise ValueError("Only list and wildcard filter are supported.")
return filtered_channels
if filter_by_id:
channels = _filter(channels, "id", filter_by_id)
if filter_by_name:
channels = _filter(channels, "name", filter_by_name)
if filter_by_component:
channels = _filter(channels, "component", filter_by_component)
return channels
def _write_winprm(ctable, prmfile="win.prm"):
"""
Four line parameters file.
"""
with open(prmfile, "w") as f:
msg = ".\n" + ctable + "\n" + ".\n.\n"
f.write(msg)
def _extract_channel(
winfile, channel, suffix="SAC", outdir=".", prmfile="win.prm", pmax=8640000
):
"""Extract one channel data from win32 file.
Parameters
----------
winfile: str
win32 file to be processed.
channel: str
Channel to be extracted.
suffix: str
SAC file suffix.
outdir: str
Output directory.
prmfile: str
Win32 parameter file.
pmax: int
Maximum number of data points.
"""
cmd = [
"win2sac_32",
winfile,
channel.id,
suffix,
outdir,
"-e",
"-p" + prmfile,
"-m" + str(pmax),
]
p = Popen(cmd, stdout=DEVNULL, stderr=PIPE)
# check stderr output
for line in p.stderr.read().decode().split("\n"):
if "The number of points is maximum over" in line:
msg = "The number of data points is over maximum. Try to increase pmax."
raise ValueError(msg)
if f"Data for channel {channel.id} not existed" in line:
# return None if no data avaiable
logger.warning(
f"Data for {channel.name}.{channel.component} ({channel.id}) "
+ "not exists. Skipped."
)
return None
filename = f"{channel.name}.{channel.component}.{suffix}"
if outdir != ".":
filename = os.path.join(outdir, filename)
if os.path.exists(filename): # some channels have no data
if suffix == "": # remove extra dot if suffix is empty
os.rename(filename, filename[:-1])
return filename[:-1]
return filename
def _channel2pz(channel, keep_sensitivity=False):
"""Convert channel information to SAC polezero file.
Transfer function = s^2 / (s^2+2hws+w^2).
"""
# Hi-net use moving coil velocity type seismometer.
if channel.unit != "m/s":
logger.warning(
f"{channel.name}.{channel.component} ({channel.id}): Unit is not velocity."
)
try:
freq = 2.0 * math.pi / channel.period
except ZeroDivisionError:
logger.warning(
f"{channel.name}.{channel.component} ({channel.id}): "
+ "Natural period = 0. Skipped."
)
return None, None, None
# calculate poles, find roots of equation s^2+2hws+w^2=0
real = -channel.damping * freq
imaginary = freq * math.sqrt(1 - channel.damping ** 2)
# calculate constant
fn = 20 # alaways assume normalization frequency is 20 Hz
s = complex(0, 2 * math.pi * fn)
A0 = abs((s ** 2 + 2 * channel.damping * freq * s + freq ** 2) / s ** 2)
if keep_sensitivity:
factor = math.pow(10, channel.preamplification / 20.0)
constant = A0 * channel.gain * factor / channel.lsb_value
else:
constant = A0
return real, imaginary, constant
def _write_pz(pzfile, real, imaginary, constant):
"""Write SAC PZ file.
Parameters
----------
pzfile: str
SAC PoleZero filename.
real: float
Real part of poles.
imaginary: float
Imaginary part of poles
constant: float
Constant in SAC PZ.
"""
with open(pzfile, "w") as pz:
pz.write("ZEROS 3\n")
pz.write("POLES 2\n")
pz.write(f"{real:9.6f} {imaginary:9.6f}\n")
pz.write(f"{real:9.6f} {-imaginary:9.6f}\n")
pz.write(f"CONSTANT {constant:e}\n")
def _extract_sacpz(channel, suffix="SAC_PZ", outdir=".", keep_sensitivity=False):
real, imaginary, constant = _channel2pz(channel, keep_sensitivity=keep_sensitivity)
if (
real is None or imaginary is None or constant is None
): # something wrong with channel information, skipped
return None
pzfile = f"{channel.name}.{channel.component}"
if suffix:
pzfile += "." + suffix
pzfile = os.path.join(outdir, pzfile)
_write_pz(pzfile, real, imaginary, constant)
return pzfile
def merge(datas, total_data, force_sort=False):
"""Merge several win32 files to one win32 file.
Parameters
----------
datas: list of str or wildcard
Win32 files to be merged.
total_data: str
Filename of ouput win32 file.
force_sort: bool
Sort all win32 files by date.
Examples
--------
If win32 files are named by starttime (e.g. ``201304040203.cnt``), sorting
win32 files in list by name/time is prefered:
>>> datas = sorted(glob.glob("20130404*.cnt"))
>>> merge(datas, "outdir/final.cnt")
If win32 files are named randomly, you should set ``force_sort`` to
``True`` to force ``catwin32`` to sort all data by time.
However, it's time consuming. Do NOT use it unless necessary:
>>> datas = ["001.cnt", "002.cnt", "003.cnt"]
>>> merge(datas, "final.cnt", force_sort=True)
You can also use wildcard to specify the win32 files to be merged.
>>> merge("20130404*.cnt", "final.cnt")
"""
if isinstance(datas, str): # wildcard support
datas = sorted(glob.glob(datas))
if not datas:
raise FileNotFoundError("Files to be merged not found.\n")
if os.path.dirname(total_data):
os.makedirs(os.path.dirname(total_data), exist_ok=True)
cmd = ["catwin32", "-o", total_data] + datas
if force_sort: # add -s option to force sort
cmd.append("-s")
subprocess.call(cmd, stdout=DEVNULL, stderr=DEVNULL)
| [
"logging.basicConfig",
"os.path.exists",
"logging.getLogger",
"os.makedirs",
"math.pow",
"subprocess.Popen",
"os.rename",
"os.path.join",
"math.sqrt",
"multiprocessing.cpu_count",
"os.path.dirname",
"subprocess.call",
"tempfile.NamedTemporaryFile",
"glob.glob"
] | [((312, 400), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'FORMAT', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.INFO, format=FORMAT, datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (331, 400), False, 'import logging\n'), ((405, 432), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (422, 432), False, 'import logging\n'), ((5171, 5182), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (5180, 5182), False, 'from multiprocessing import Pool, cpu_count\n'), ((10977, 11016), 'subprocess.Popen', 'Popen', (['cmd'], {'stdout': 'DEVNULL', 'stderr': 'PIPE'}), '(cmd, stdout=DEVNULL, stderr=PIPE)\n', (10982, 11016), False, 'from subprocess import DEVNULL, PIPE, Popen\n'), ((11716, 11740), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (11730, 11740), False, 'import os\n'), ((14147, 14175), 'os.path.join', 'os.path.join', (['outdir', 'pzfile'], {}), '(outdir, pzfile)\n', (14159, 14175), False, 'import os\n'), ((15432, 15459), 'os.path.dirname', 'os.path.dirname', (['total_data'], {}), '(total_data)\n', (15447, 15459), False, 'import os\n'), ((15655, 15707), 'subprocess.call', 'subprocess.call', (['cmd'], {'stdout': 'DEVNULL', 'stderr': 'DEVNULL'}), '(cmd, stdout=DEVNULL, stderr=DEVNULL)\n', (15670, 15707), False, 'import subprocess\n'), ((4184, 4206), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (4198, 4206), False, 'import os\n'), ((4216, 4250), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (4227, 4250), False, 'import os\n'), ((7151, 7173), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7165, 7173), False, 'import os\n'), ((7183, 7217), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (7194, 7217), False, 'import os\n'), ((11677, 11707), 'os.path.join', 'os.path.join', (['outdir', 'filename'], {}), '(outdir, filename)\n', (11689, 11707), False, 'import os\n'), ((12696, 12731), 'math.sqrt', 'math.sqrt', (['(1 - channel.damping ** 2)'], {}), '(1 - channel.damping ** 2)\n', (12705, 12731), False, 'import math\n'), ((12978, 13023), 'math.pow', 'math.pow', (['(10)', '(channel.preamplification / 20.0)'], {}), '(10, channel.preamplification / 20.0)\n', (12986, 13023), False, 'import math\n'), ((4325, 4354), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4352, 4354), False, 'import tempfile\n'), ((11848, 11882), 'os.rename', 'os.rename', (['filename', 'filename[:-1]'], {}), '(filename, filename[:-1])\n', (11857, 11882), False, 'import os\n'), ((15313, 15329), 'glob.glob', 'glob.glob', (['datas'], {}), '(datas)\n', (15322, 15329), False, 'import glob\n'), ((15481, 15508), 'os.path.dirname', 'os.path.dirname', (['total_data'], {}), '(total_data)\n', (15496, 15508), False, 'import os\n')] |
import asyncio
import aiopg
dsn = 'dbname=aiopg user=aiopg password=<PASSWORD> host=127.0.0.1'
@asyncio.coroutine
def test_select():
pool = yield from aiopg.create_pool(dsn)
with (yield from pool.cursor()) as cur:
yield from cur.execute("SELECT 1")
ret = yield from cur.fetchone()
assert ret == (1,)
print("ALL DONE")
loop = asyncio.get_event_loop()
loop.run_until_complete(test_select())
| [
"asyncio.get_event_loop",
"aiopg.create_pool"
] | [((366, 390), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (388, 390), False, 'import asyncio\n'), ((158, 180), 'aiopg.create_pool', 'aiopg.create_pool', (['dsn'], {}), '(dsn)\n', (175, 180), False, 'import aiopg\n')] |
from collections import defaultdict
import io
import hashlib
from datetime import date, datetime
from pyexcel_xls import get_data as xls_get
import pandas
import magic
from contextlib import closing
import csv
from django.db import connection
from io import StringIO
import uuid
from psycopg2.errors import UniqueViolation
from django.db import IntegrityError
from django.utils.encoding import force_bytes
from django.utils.timezone import make_aware
from django.conf import settings
from django.utils.datastructures import MultiValueDictKeyError
from django_filters import rest_framework as filters
from django_filters import Filter
from django_filters.filters import DateFromToRangeFilter
from djqscsv import render_to_csv_response
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.mixins import DestroyModelMixin, ListModelMixin, RetrieveModelMixin
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from core.fernet import FernetEncryption
from care.facility.api.serializers.patient_external_test import (
PatientExternalTestSerializer, PatientExternalTestICMRDataSerializer
)
from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory
from care.users.models import User, State, District
def prettyerrors(errors):
pretty_errors = defaultdict(list)
for attribute in PatientExternalTest.HEADER_CSV_MAPPING.keys():
if attribute in errors:
for error in errors.get(attribute, ""):
pretty_errors[attribute].append(str(error))
return dict(pretty_errors)
class MFilter(Filter):
def filter(self, qs, value):
if not value:
return qs
values = value.split(",")
_filter = {
self.field_name + "__in": values,
self.field_name + "__isnull": False,
}
qs = qs.filter(**_filter)
return qs
class PatientExternalTestFilter(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
srf_id = filters.CharFilter(field_name="srf_id", lookup_expr="icontains")
mobile_number = filters.CharFilter(field_name="mobile_number", lookup_expr="icontains")
wards = MFilter(field_name="ward__id")
districts = MFilter(field_name="district__id")
local_bodies = MFilter(field_name="local_body__id")
sample_collection_date = DateFromToRangeFilter(field_name="sample_collection_date")
result_date = DateFromToRangeFilter(field_name="result_date")
created_date = DateFromToRangeFilter(field_name="created_date")
class PatientExternalTestViewSet(
RetrieveModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet,
):
serializer_class = PatientExternalTestSerializer
queryset = PatientExternalTest.objects.select_related("ward", "local_body", "district").all().order_by("-id")
permission_classes = (IsAuthenticated,)
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = PatientExternalTestFilter
parser_classes = (MultiPartParser, FormParser, JSONParser)
def get_queryset(self):
queryset = self.queryset
if not self.request.user.is_superuser:
if self.request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
queryset = queryset.filter(district__state=self.request.user.state)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
queryset = queryset.filter(district=self.request.user.district)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["LocalBodyAdmin"]:
queryset = queryset.filter(local_body=self.request.user.local_body)
elif self.request.user.user_type >= User.TYPE_VALUE_MAP["WardAdmin"]:
queryset = queryset.filter(ward=self.request.user.ward, ward__isnull=False)
else:
queryset = queryset.none()
return queryset
def destroy(self, request, *args, **kwargs):
if self.request.user.user_type < User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
raise PermissionDenied()
return super().destroy(request, *args, **kwargs)
def check_upload_permission(self):
if (
self.request.user.is_superuser == True
or self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
):
return True
return False
def list(self, request, *args, **kwargs):
if settings.CSV_REQUEST_PARAMETER in request.GET:
mapping = PatientExternalTest.CSV_MAPPING.copy()
pretty_mapping = PatientExternalTest.CSV_MAKE_PRETTY.copy()
queryset = self.filter_queryset(self.get_queryset()).values(*mapping.keys())
return render_to_csv_response(queryset, field_header_map=mapping, field_serializer_map=pretty_mapping)
return super(PatientExternalTestViewSet, self).list(request, *args, **kwargs)
@action(methods=["POST"], detail=False)
def bulk_upsert(self, request, *args, **kwargs):
if not self.check_upload_permission():
raise PermissionDenied("Permission to Endpoint Denied")
# if len(request.FILES.keys()) != 1:
# raise ValidationError({"file": "Upload 1 File at a time"})
# csv_file = request.FILES[list(request.FILES.keys())[0]]
# csv_file.seek(0)
# reader = csv.DictReader(io.StringIO(csv_file.read().decode("utf-8-sig")))
if "sample_tests" not in request.data:
raise ValidationError({"sample_tests": "No Data was provided"})
if type(request.data["sample_tests"]) != type([]):
raise ValidationError({"sample_tests": "Data should be provided as a list"})
errors = {}
counter = 0
ser_objects = []
invalid = False
for sample in request.data["sample_tests"]:
counter += 1
serialiser_obj = PatientExternalTestSerializer(data=sample)
valid = serialiser_obj.is_valid()
current_error = prettyerrors(serialiser_obj._errors)
if current_error and (not valid):
errors[counter] = current_error
invalid = True
ser_objects.append(serialiser_obj)
if invalid:
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
for ser_object in ser_objects:
ser_object.save()
return Response(status=status.HTTP_202_ACCEPTED)
@action(methods=["POST"], detail=False)
def bulk_upsert_icmr(self, request, *args, **kwargs):
if not self.check_upload_permission():
raise PermissionDenied("Permission to Endpoint Denied")
parsed_data = []
states = State.objects.all().prefetch_related("districts")
districts = District.objects.all()
states_dict = {state.name.lower(): state for state in states}
districts_dict = {district.name.lower(): district for district in districts}
excel_data = {}
uploaded_file = request.FILES["file"]
file_hash = hashlib.blake2b()
while True:
chunk = uploaded_file.read(16384)
if not chunk:
break
file_hash.update(chunk)
existing_file_hash = PatientExternalTestUploadHistory.objects.filter(hash=file_hash.hexdigest())
if existing_file_hash.exists():
return Response(data="This file has already been uploaded.", status=status.HTTP_400_BAD_REQUEST)
uploaded_file.seek(0)
file_read = uploaded_file.read()
mime = magic.Magic(mime=True)
mime_type = mime.from_buffer(file_read)
extension = str(uploaded_file).split('.')[-1]
if mime_type == "application/vnd.ms-excel":
excel_data = xls_get(uploaded_file, column_limit=41)
parsed_data = self.parse_excel(excel_data=excel_data, states_dict=states_dict,
districts_dict=districts_dict)
elif mime_type == "text/plain" and extension == "xls":
# assuming the file is uploaded as is when exported from icmr portal
# icmr portal file has an extension of .xls but actually is a tabbed csv file in plaintext format
file_stream = io.StringIO(file_read.decode('utf-8'))
csv_data = pandas.read_csv(file_stream, delimiter='\t').to_dict('records')
parsed_data = self.parse_tabbed_csv(
csv_data=csv_data, states_dict=states_dict, districts_dict=districts_dict)
try:
self.copy_to_db(parsed_data)
except UniqueViolation as error:
return Response(data="Duplicate entries found.", status=status.HTTP_400_BAD_REQUEST)
PatientExternalTestUploadHistory.objects.create(file_name=str(
uploaded_file), uploaded_by=request.user, hash=file_hash.hexdigest(),
most_recent_date_of_sample_tested_in_file=self.most_recent_date_of_sample_tested_in_file)
response_message = "Tests were successfully uploaded and saved."
response = {"message": response_message}
return Response(data=response, status=status.HTTP_200_OK)
def parse_tabbed_csv(self, csv_data, states_dict, districts_dict):
parsed_data = []
self.most_recent_date_of_sample_tested_in_file = None
for row in csv_data:
dictionary = {}
for key, item in row.items():
key, value = self.parse_dictionary(key=key.strip(), item=item,
states_dict=states_dict, districts_dict=districts_dict)
dictionary[key] = value
if dictionary:
parsed_data.append(dictionary)
return parsed_data
def parse_excel(self, excel_data, states_dict, districts_dict):
self.most_recent_date_of_sample_tested_in_file = None
parsed_data = []
file_name = list(excel_data.keys())[0]
keys = []
for i, row in enumerate(excel_data.get(file_name)):
if i == 0:
keys = [item.strip() for item in row]
else:
dictionary = {}
for j, item in enumerate(row):
key, value = self.parse_dictionary(
key=keys[j], item=item, states_dict=states_dict, districts_dict=districts_dict)
dictionary[key] = value
if dictionary:
parsed_data.append(dictionary)
return parsed_data
def parse_dictionary(self, key, item, states_dict, districts_dict):
if isinstance(item, str):
item = item.strip()
key = PatientExternalTest.ICMR_EXCEL_HEADER_KEY_MAPPING.get(key)
if key == "state":
state = states_dict.get(item.lower())
if state:
item = state.id
key = "state_id"
elif key == "district":
district = districts_dict.get(item.lower())
if district:
item = district.id
key = "district_id"
elif key in ["is_hospitalized", "is_repeat"]:
if item and "yes" in item:
item = True
else:
item = False
elif key in ["hospitalization_date", "confirmation_date", "sample_received_date", "entry_date"]:
if "N/A" in item:
item = None
elif item:
item = make_aware(datetime.strptime(item, "%Y-%m-%d %H:%M:%S"))
elif key in ["sample_collection_date"]:
item = make_aware(datetime.strptime(item, "%Y-%m-%d %H:%M:%S")).date()
elif key == "date_of_sample_tested":
item = make_aware(datetime.strptime(item, "%Y-%m-%d %H:%M:%S"))
if self.most_recent_date_of_sample_tested_in_file is None or self.most_recent_date_of_sample_tested_in_file < item:
self.most_recent_date_of_sample_tested_in_file = item
return key, item
def copy_to_db(self, n_records):
fernet = FernetEncryption()
stream = StringIO()
writer = csv.writer(stream, delimiter='\t')
icmr_id_set = set()
for i in n_records:
if i["icmr_id"] not in icmr_id_set:
aadhar = fernet.encrypt(i["aadhar_number"], connection)
passport = fernet.encrypt(i["passport_number"], connection)
writer.writerow([str(uuid.uuid4()), 'false', i["name"], i["age"], i["age_in"], i["gender"], i["address"], aadhar, passport,
i["mobile_number"], i["is_repeat"], i["lab_name"], i["test_type"], i["sample_type"], i["result"],
i["srf_id"], i["patient_category"], i["icmr_id"], i["icmr_patient_id"], i["contact_number_of"],
i["nationality"], i['pincode'], i['village_town'], i['underlying_medical_condition'], i['sample_id'],
i['hospital_name'], i['hospital_state'], i['hospital_district'], i['symptom_status'], i['symptoms'],
i['egene'], i['rdrp'], i['orf1b'], i['remarks'], i['state_id'], i['district_id'], i['is_hospitalized']])
icmr_id_set.add(i["icmr_id"])
stream.seek(0)
with closing(connection.cursor()) as cursor:
cursor.copy_from(
file=stream,
table=PatientExternalTest.objects.model._meta.db_table,
sep='\t',
columns=('external_id', 'deleted', 'name', 'age', 'age_in', 'gender', 'address', 'aadhar_number', 'passport_number',
'mobile_number', 'is_repeat', 'lab_name', 'test_type', 'sample_type', 'result', 'srf_id', 'patient_category',
'icmr_id', 'icmr_patient_id', 'contact_number_of', 'nationality', 'pincode', 'village_town',
'underlying_medical_condition', 'sample_id', 'hospital_name', 'hospital_state', 'hospital_district',
'symptom_status', 'symptoms', 'egene', 'rdrp', 'orf1b', 'remarks', 'state_id', 'district_id', 'is_hospitalized'),
)
| [
"care.facility.api.serializers.patient_external_test.PatientExternalTestSerializer",
"care.facility.models.PatientExternalTest.CSV_MAKE_PRETTY.copy",
"pandas.read_csv",
"rest_framework.exceptions.ValidationError",
"rest_framework.decorators.action",
"pyexcel_xls.get_data",
"core.fernet.FernetEncryption"... | [((1570, 1587), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1581, 1587), False, 'from collections import defaultdict\n'), ((1609, 1654), 'care.facility.models.PatientExternalTest.HEADER_CSV_MAPPING.keys', 'PatientExternalTest.HEADER_CSV_MAPPING.keys', ([], {}), '()\n', (1652, 1654), False, 'from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory\n'), ((2209, 2271), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', ([], {'field_name': '"""name"""', 'lookup_expr': '"""icontains"""'}), "(field_name='name', lookup_expr='icontains')\n", (2227, 2271), True, 'from django_filters import rest_framework as filters\n'), ((2285, 2349), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', ([], {'field_name': '"""srf_id"""', 'lookup_expr': '"""icontains"""'}), "(field_name='srf_id', lookup_expr='icontains')\n", (2303, 2349), True, 'from django_filters import rest_framework as filters\n'), ((2370, 2441), 'django_filters.rest_framework.CharFilter', 'filters.CharFilter', ([], {'field_name': '"""mobile_number"""', 'lookup_expr': '"""icontains"""'}), "(field_name='mobile_number', lookup_expr='icontains')\n", (2388, 2441), True, 'from django_filters import rest_framework as filters\n'), ((2621, 2679), 'django_filters.filters.DateFromToRangeFilter', 'DateFromToRangeFilter', ([], {'field_name': '"""sample_collection_date"""'}), "(field_name='sample_collection_date')\n", (2642, 2679), False, 'from django_filters.filters import DateFromToRangeFilter\n'), ((2698, 2745), 'django_filters.filters.DateFromToRangeFilter', 'DateFromToRangeFilter', ([], {'field_name': '"""result_date"""'}), "(field_name='result_date')\n", (2719, 2745), False, 'from django_filters.filters import DateFromToRangeFilter\n'), ((2765, 2813), 'django_filters.filters.DateFromToRangeFilter', 'DateFromToRangeFilter', ([], {'field_name': '"""created_date"""'}), "(field_name='created_date')\n", (2786, 2813), False, 'from django_filters.filters import DateFromToRangeFilter\n'), ((5185, 5223), 'rest_framework.decorators.action', 'action', ([], {'methods': "['POST']", 'detail': '(False)'}), "(methods=['POST'], detail=False)\n", (5191, 5223), False, 'from rest_framework.decorators import action\n'), ((6703, 6741), 'rest_framework.decorators.action', 'action', ([], {'methods': "['POST']", 'detail': '(False)'}), "(methods=['POST'], detail=False)\n", (6709, 6741), False, 'from rest_framework.decorators import action\n'), ((6655, 6696), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_202_ACCEPTED'}), '(status=status.HTTP_202_ACCEPTED)\n', (6663, 6696), False, 'from rest_framework.response import Response\n'), ((7028, 7050), 'care.users.models.District.objects.all', 'District.objects.all', ([], {}), '()\n', (7048, 7050), False, 'from care.users.models import User, State, District\n'), ((7298, 7315), 'hashlib.blake2b', 'hashlib.blake2b', ([], {}), '()\n', (7313, 7315), False, 'import hashlib\n'), ((7810, 7832), 'magic.Magic', 'magic.Magic', ([], {'mime': '(True)'}), '(mime=True)\n', (7821, 7832), False, 'import magic\n'), ((9352, 9402), 'rest_framework.response.Response', 'Response', ([], {'data': 'response', 'status': 'status.HTTP_200_OK'}), '(data=response, status=status.HTTP_200_OK)\n', (9360, 9402), False, 'from rest_framework.response import Response\n'), ((10918, 10976), 'care.facility.models.PatientExternalTest.ICMR_EXCEL_HEADER_KEY_MAPPING.get', 'PatientExternalTest.ICMR_EXCEL_HEADER_KEY_MAPPING.get', (['key'], {}), '(key)\n', (10971, 10976), False, 'from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory\n'), ((12291, 12309), 'core.fernet.FernetEncryption', 'FernetEncryption', ([], {}), '()\n', (12307, 12309), False, 'from core.fernet import FernetEncryption\n'), ((12327, 12337), 'io.StringIO', 'StringIO', ([], {}), '()\n', (12335, 12337), False, 'from io import StringIO\n'), ((12355, 12389), 'csv.writer', 'csv.writer', (['stream'], {'delimiter': '"""\t"""'}), "(stream, delimiter='\\t')\n", (12365, 12389), False, 'import csv\n'), ((4329, 4347), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (4345, 4347), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError\n'), ((4778, 4816), 'care.facility.models.PatientExternalTest.CSV_MAPPING.copy', 'PatientExternalTest.CSV_MAPPING.copy', ([], {}), '()\n', (4814, 4816), False, 'from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory\n'), ((4846, 4888), 'care.facility.models.PatientExternalTest.CSV_MAKE_PRETTY.copy', 'PatientExternalTest.CSV_MAKE_PRETTY.copy', ([], {}), '()\n', (4886, 4888), False, 'from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory\n'), ((4997, 5096), 'djqscsv.render_to_csv_response', 'render_to_csv_response', (['queryset'], {'field_header_map': 'mapping', 'field_serializer_map': 'pretty_mapping'}), '(queryset, field_header_map=mapping,\n field_serializer_map=pretty_mapping)\n', (5019, 5096), False, 'from djqscsv import render_to_csv_response\n'), ((5342, 5391), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', (['"""Permission to Endpoint Denied"""'], {}), "('Permission to Endpoint Denied')\n", (5358, 5391), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError\n'), ((5752, 5809), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'sample_tests': 'No Data was provided'}"], {}), "({'sample_tests': 'No Data was provided'})\n", (5767, 5809), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError\n'), ((5887, 5957), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'sample_tests': 'Data should be provided as a list'}"], {}), "({'sample_tests': 'Data should be provided as a list'})\n", (5902, 5957), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError\n'), ((6153, 6195), 'care.facility.api.serializers.patient_external_test.PatientExternalTestSerializer', 'PatientExternalTestSerializer', ([], {'data': 'sample'}), '(data=sample)\n', (6182, 6195), False, 'from care.facility.api.serializers.patient_external_test import PatientExternalTestSerializer, PatientExternalTestICMRDataSerializer\n'), ((6518, 6570), 'rest_framework.response.Response', 'Response', (['errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(errors, status=status.HTTP_400_BAD_REQUEST)\n', (6526, 6570), False, 'from rest_framework.response import Response\n'), ((6865, 6914), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', (['"""Permission to Endpoint Denied"""'], {}), "('Permission to Endpoint Denied')\n", (6881, 6914), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError\n'), ((7632, 7726), 'rest_framework.response.Response', 'Response', ([], {'data': '"""This file has already been uploaded."""', 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data='This file has already been uploaded.', status=status.\n HTTP_400_BAD_REQUEST)\n", (7640, 7726), False, 'from rest_framework.response import Response\n'), ((8013, 8052), 'pyexcel_xls.get_data', 'xls_get', (['uploaded_file'], {'column_limit': '(41)'}), '(uploaded_file, column_limit=41)\n', (8020, 8052), True, 'from pyexcel_xls import get_data as xls_get\n'), ((6958, 6977), 'care.users.models.State.objects.all', 'State.objects.all', ([], {}), '()\n', (6975, 6977), False, 'from care.users.models import User, State, District\n'), ((8880, 8957), 'rest_framework.response.Response', 'Response', ([], {'data': '"""Duplicate entries found."""', 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data='Duplicate entries found.', status=status.HTTP_400_BAD_REQUEST)\n", (8888, 8957), False, 'from rest_framework.response import Response\n'), ((13545, 13564), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (13562, 13564), False, 'from django.db import connection\n'), ((2996, 3072), 'care.facility.models.PatientExternalTest.objects.select_related', 'PatientExternalTest.objects.select_related', (['"""ward"""', '"""local_body"""', '"""district"""'], {}), "('ward', 'local_body', 'district')\n", (3038, 3072), False, 'from care.facility.models import PatientExternalTest, PatientExternalTestUploadHistory\n'), ((8561, 8605), 'pandas.read_csv', 'pandas.read_csv', (['file_stream'], {'delimiter': '"""\t"""'}), "(file_stream, delimiter='\\t')\n", (8576, 8605), False, 'import pandas\n'), ((12682, 12694), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12692, 12694), False, 'import uuid\n'), ((11712, 11756), 'datetime.datetime.strptime', 'datetime.strptime', (['item', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(item, '%Y-%m-%d %H:%M:%S')\n", (11729, 11756), False, 'from datetime import date, datetime\n'), ((11966, 12010), 'datetime.datetime.strptime', 'datetime.strptime', (['item', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(item, '%Y-%m-%d %H:%M:%S')\n", (11983, 12010), False, 'from datetime import date, datetime\n'), ((11837, 11881), 'datetime.datetime.strptime', 'datetime.strptime', (['item', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(item, '%Y-%m-%d %H:%M:%S')\n", (11854, 11881), False, 'from datetime import date, datetime\n')] |
"""Converts synonyms into SMILES for the data from Gerber's paper."""
# data/hsd11b1_validation/get_smiles_cactus.py
from io import BytesIO
import pandas as pd
import pycurl
def getsmiles_cactus(name):
"""Converts synonyms into SMILES strings.
A function to use the public cactus (National Institutes of Cancer Research)
webservice to retrieve a smiles string from a synonym.
Args:
name: any trivial or IUPAC name for a molecule
Returns:
Canonical smiles string for that molecule.
"""
url = "https://cactus.nci.nih.gov/chemical/structure/" + name + "/smiles"
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
smiles = buffer.getvalue().decode("UTF-8")
print(name, smiles)
return smiles
def main():
"""Runs a batch of name conversions into SMILES."""
data = "01-robb_data.txt"
df = pd.read_csv(data, sep="\t")
df["SMILES"] = df.apply(lambda row: getsmiles_cactus(row["Iupac"]), axis=1)
df.to_csv("02-robb_data_smiles.txt", sep="\t")
main()
| [
"pycurl.Curl",
"io.BytesIO",
"pandas.read_csv"
] | [((615, 624), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (622, 624), False, 'from io import BytesIO\n'), ((633, 646), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (644, 646), False, 'import pycurl\n'), ((934, 961), 'pandas.read_csv', 'pd.read_csv', (['data'], {'sep': '"""\t"""'}), "(data, sep='\\t')\n", (945, 961), True, 'import pandas as pd\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 30.07.2019
# Last Modified Date: 16.10.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# Distributed under MIT license
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 07/30/2019
#
# This file is part of eval-clevr-instance-retrieval.
# Distributed under terms of the MIT license.
import six
import functools
import sys
from IPython.core import ultratb
import numpy as np
import jacinle.io as io
import jacinle.random as random
from jacinle.cli.argument import JacArgumentParser
from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm
from jacinle.utils.meter import GroupMeters
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Linux', call_pdb=True)
parser = JacArgumentParser()
parser.add_argument('--scene-json', required=True, type='checked_file')
parser.add_argument('--preds-json', required=True, type='checked_file')
args = parser.parse_args()
class Definition(object):
annotation_attribute_names = ['color', 'material', 'shape', 'size']
annotation_relation_names = ['behind', 'front', 'left', 'right']
concepts = {
'color': ['gray', 'red', 'blue', 'green', 'brown', 'purple', 'cyan', 'yellow'],
'material': ['rubber', 'metal'],
'shape': ['cube', 'sphere', 'cylinder'],
'size': ['small', 'large']
}
concept2attribute = {
v: k for k, vs in concepts.items() for v in vs
}
relational_concepts = {
'spatial_relation': ['left', 'right', 'front', 'behind']
}
synonyms = {
"thing": ["thing", "object"],
"sphere": ["sphere", "ball"],
"cube": ["cube", "block"],
"cylinder": ["cylinder"],
"large": ["large", "big"],
"small": ["small", "tiny"],
"metal": ["metallic", "metal", "shiny"],
"rubber": ["rubber", "matte"],
}
word2lemma = {
v: k for k, vs in synonyms.items() for v in vs
}
def_ = Definition()
def get_desc(obj):
names = [obj[k] for k in def_.annotation_attribute_names]
for i, n in enumerate(names):
if n in def_.synonyms:
names[i] = random.choice_list(def_.synonyms[n])
return names
def run_desc_obj(obj, desc):
for d in desc:
dd = def_.word2lemma.get(d, d)
if dd != obj[def_.concept2attribute[dd]]:
return False
return True
def run_desc_pred(all_preds, desc):
s = 10000
for d in desc:
s = np.fmin(s, all_preds[d])
return s
def test(index, all_objs, all_preds, meter):
obj = all_objs[index]
nr_descriptors = random.randint(1, 3)
desc = random.choice_list(get_desc(obj), size=nr_descriptors)
if isinstance(desc, six.string_types):
desc = [desc]
filtered_objs = [i for i, o in enumerate(all_objs) if not run_desc_obj(o, desc)]
all_scores = run_desc_pred(all_preds, desc)
rank = (all_scores[filtered_objs] > all_scores[index]).sum()
# print(desc)
# print(all_scores)
# print(all_scores[index])
meter.update('r@01', rank <= 1)
meter.update('r@02', rank <= 2)
meter.update('r@03', rank <= 3)
meter.update('r@04', rank <= 4)
meter.update('r@05', rank <= 5)
def transpose_scene(scene):
ret = dict()
for k in scene['0']:
ret[k] = np.array([scene[str(o)][k] for o in range(len(scene))])
return ret
def main():
scenes = io.load_json(args.scene_json)['scenes']
preds = io.load(args.preds_json)
if isinstance(preds, dict):
preds = list(preds.values())
if False:
preds = [transpose_scene(s) for s in preds]
# flattened_objs = [o for s in scenes for o in s['objects']]
# flattened_preds = {
# k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
# for k in preds[0]
# }
meter = GroupMeters()
'''
for i, scene in tqdm_gofor(scenes, mininterval=0.5):
for j in range(len(scene['objects'])):
test(j, scene['objects'], preds[i], meter)
'''
for i, pred in tqdm_gofor(preds, mininterval=0.5):
scene = scenes[i]
for j in range(len(scene['objects'])):
test(j, scene['objects'], pred, meter)
print(meter.format_simple('Results:', compressed=False))
if __name__ == '__main__':
main()
| [
"jacinle.io.load_json",
"IPython.core.ultratb.FormattedTB",
"jacinle.utils.meter.GroupMeters",
"jacinle.random.randint",
"jacinle.cli.argument.JacArgumentParser",
"jacinle.random.choice_list",
"jacinle.io.load",
"numpy.fmin",
"jacinle.utils.tqdm.tqdm_gofor"
] | [((890, 960), 'IPython.core.ultratb.FormattedTB', 'ultratb.FormattedTB', ([], {'mode': '"""Plain"""', 'color_scheme': '"""Linux"""', 'call_pdb': '(True)'}), "(mode='Plain', color_scheme='Linux', call_pdb=True)\n", (909, 960), False, 'from IPython.core import ultratb\n'), ((976, 995), 'jacinle.cli.argument.JacArgumentParser', 'JacArgumentParser', ([], {}), '()\n', (993, 995), False, 'from jacinle.cli.argument import JacArgumentParser\n'), ((2809, 2829), 'jacinle.random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2823, 2829), True, 'import jacinle.random as random\n'), ((3654, 3678), 'jacinle.io.load', 'io.load', (['args.preds_json'], {}), '(args.preds_json)\n', (3661, 3678), True, 'import jacinle.io as io\n'), ((4023, 4036), 'jacinle.utils.meter.GroupMeters', 'GroupMeters', ([], {}), '()\n', (4034, 4036), False, 'from jacinle.utils.meter import GroupMeters\n'), ((4232, 4266), 'jacinle.utils.tqdm.tqdm_gofor', 'tqdm_gofor', (['preds'], {'mininterval': '(0.5)'}), '(preds, mininterval=0.5)\n', (4242, 4266), False, 'from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm\n'), ((2677, 2701), 'numpy.fmin', 'np.fmin', (['s', 'all_preds[d]'], {}), '(s, all_preds[d])\n', (2684, 2701), True, 'import numpy as np\n'), ((3602, 3631), 'jacinle.io.load_json', 'io.load_json', (['args.scene_json'], {}), '(args.scene_json)\n', (3614, 3631), True, 'import jacinle.io as io\n'), ((2360, 2396), 'jacinle.random.choice_list', 'random.choice_list', (['def_.synonyms[n]'], {}), '(def_.synonyms[n])\n', (2378, 2396), True, 'import jacinle.random as random\n')] |
import pytest
@pytest.fixture(scope="module")
def client(looper, txnPoolNodeSet, client1, client1Connected):
return client1Connected
| [
"pytest.fixture"
] | [((17, 47), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (31, 47), False, 'import pytest\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2018 10X Genomics, Inc. All rights reserved.
#
# Utils for feature-barcoding technology
import numpy as np
import os
import json
import tenkit.safe_json as tk_safe_json
def check_if_none_or_empty(matrix):
if matrix is None or matrix.get_shape()[0] == 0 or matrix.get_shape()[1] == 0:
return True
else:
return False
def write_json_from_dict(input_dict, out_file_name):
with open(out_file_name, 'w') as f:
json.dump(tk_safe_json.json_sanitize(input_dict), f, indent=4, sort_keys=True)
def write_csv_from_dict(input_dict, out_file_name, header=None):
with open(out_file_name, 'w') as f:
if header is not None:
f.write(header)
for (key, value) in input_dict.iteritems():
line = str(key) + ',' + str(value) + '\n'
f.write(line)
def get_depth_string(num_reads_per_cell):
return str(np.round(float(num_reads_per_cell)/1000,1)) + "k"
def all_files_present(list_file_paths):
if list_file_paths is None:
return False
files_none = [fpath is None for fpath in list_file_paths]
if any(files_none):
return False
files_present = [os.path.isfile(fpath) for fpath in list_file_paths]
if not(all(files_present)):
return False
return True
| [
"os.path.isfile",
"tenkit.safe_json.json_sanitize"
] | [((1193, 1214), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (1207, 1214), False, 'import os\n'), ((494, 532), 'tenkit.safe_json.json_sanitize', 'tk_safe_json.json_sanitize', (['input_dict'], {}), '(input_dict)\n', (520, 532), True, 'import tenkit.safe_json as tk_safe_json\n')] |
import json
import rimu
from rimu import options
def unexpectedError(_, message):
raise Exception(f'unexpected callback: {message}')
def test_render():
assert rimu.render('Hello World!') == '<p>Hello World!</p>'
def test_jsonTests():
with open('./tests/rimu-tests.json') as f:
data = json.load(f)
for spec in data:
description = spec['description']
unsupported = 'py' in spec.get('unsupported', '')
if unsupported:
print(f'skipped unsupported: {description}')
continue
print(description)
renderOptions = rimu.RenderOptions()
renderOptions.safeMode = spec['options'].get('safeMode')
renderOptions.htmlReplacement = spec['options'].get('htmlReplacement')
renderOptions.reset = spec['options'].get('reset')
msg = ''
def callback(message: rimu.CallbackMessage):
nonlocal msg
msg += f'{message.type}: {message.text}\n'
# Captured callback message.
if spec['expectedCallback'] or unsupported:
renderOptions.callback = callback
else:
# Callback should not occur, this will throw an error.
renderOptions.callback = unexpectedError
input = spec['input']
result = rimu.render(input, renderOptions)
assert result == spec['expectedOutput'], description
if spec['expectedCallback']:
assert msg.strip() == spec['expectedCallback']
| [
"json.load",
"rimu.render",
"rimu.RenderOptions"
] | [((172, 199), 'rimu.render', 'rimu.render', (['"""Hello World!"""'], {}), "('Hello World!')\n", (183, 199), False, 'import rimu\n'), ((311, 323), 'json.load', 'json.load', (['f'], {}), '(f)\n', (320, 323), False, 'import json\n'), ((599, 619), 'rimu.RenderOptions', 'rimu.RenderOptions', ([], {}), '()\n', (617, 619), False, 'import rimu\n'), ((1290, 1323), 'rimu.render', 'rimu.render', (['input', 'renderOptions'], {}), '(input, renderOptions)\n', (1301, 1323), False, 'import rimu\n')] |
# -*- coding: utf-8 -*-
"""
app_test.py
Tests the tkit.App class.
Author: <NAME>; Oct 2017
License: MIT
"""
import tkit
if __name__ == "__main__":
# Create app
test_app = tkit.App("Test App", 250, 100)
# Create and customize menubar
menubar = tkit.Menubar()
menubar.add_menu("File")
#test_menubar.menus["File"].add_action("Test", app.mainloop)
menubar.menus["File"].add_action("Close", test_app.close)
menubar.add_menu("Help")
menubar.menus["Help"].add_action(
"About", tkit.Popup("About", "This program ...").show_info)
# Add menubar to app
test_app.add_widget(menubar)
test_app.add_widget(tkit.BrowseFile())
# Run it
test_app.add_button("OK", test_app.cmd_collect_values)
test_app.mainloop()
| [
"tkit.Popup",
"tkit.Menubar",
"tkit.BrowseFile",
"tkit.App"
] | [((183, 213), 'tkit.App', 'tkit.App', (['"""Test App"""', '(250)', '(100)'], {}), "('Test App', 250, 100)\n", (191, 213), False, 'import tkit\n'), ((263, 277), 'tkit.Menubar', 'tkit.Menubar', ([], {}), '()\n', (275, 277), False, 'import tkit\n'), ((652, 669), 'tkit.BrowseFile', 'tkit.BrowseFile', ([], {}), '()\n', (667, 669), False, 'import tkit\n'), ((518, 557), 'tkit.Popup', 'tkit.Popup', (['"""About"""', '"""This program ..."""'], {}), "('About', 'This program ...')\n", (528, 557), False, 'import tkit\n')] |
""" Database models
"""
from typing import Tuple
import attr
import sqlalchemy as sa
from .settings import DATCORE_STR, SIMCORE_S3_ID, SIMCORE_S3_STR
#FIXME: W0611:Unused UUID imported from sqlalchemy.dialects.postgresql
#from sqlalchemy.dialects.postgresql import UUID
#FIXME: R0902: Too many instance attributes (11/7) (too-many-instance-attributes)
#pylint: disable=R0902
metadata = sa.MetaData()
# File meta data
file_meta_data = sa.Table(
"file_meta_data", metadata,
sa.Column("file_uuid", sa.String, primary_key=True),
sa.Column("location_id", sa.String),
sa.Column("location", sa.String),
sa.Column("bucket_name", sa.String),
sa.Column("object_name", sa.String),
sa.Column("project_id", sa.String),
sa.Column("project_name", sa.String),
sa.Column("node_id", sa.String),
sa.Column("node_name", sa.String),
sa.Column("file_name", sa.String),
sa.Column("user_id", sa.String),
sa.Column("user_name", sa.String)
# sa.Column("state", sa.String())
)
def _parse_datcore(file_uuid: str) -> Tuple[str, str]:
# we should have 12/123123123/111.txt
object_name = "invalid"
dataset_name = "invalid"
parts = file_uuid.split("/")
if len(parts) > 1:
dataset_name = parts[0]
object_name = "/".join(parts[1:])
return dataset_name, object_name
def _locations():
# TODO: so far this is hardcoded
simcore_s3 = {
"name" : SIMCORE_S3_STR,
"id" : 0
}
datcore = {
"name" : DATCORE_STR,
"id" : 1
}
return [simcore_s3, datcore]
def _location_from_id(location_id : str) ->str:
# TODO create a map to sync _location_from_id and _location_from_str
loc_str = "undefined"
if location_id == "0":
loc_str = SIMCORE_S3_STR
elif location_id == "1":
loc_str = DATCORE_STR
return loc_str
def _location_from_str(location : str) ->str:
intstr = "undefined"
if location == SIMCORE_S3_STR:
intstr = "0"
elif location == DATCORE_STR:
intstr = "1"
return intstr
@attr.s(auto_attribs=True)
class FileMetaData:
""" This is a proposal, probably no everything is needed.
It is actually an overkill
file_name : display name for a file
location_id : storage location
location_name : storage location display name
project_id : project_id
projec_name : project display name
node_id : node id
node_name : display_name
bucket_name : name of the bucket
object_name : s3 object name = folder/folder/filename.ending
user_id : user_id
user_name : user_name
file_uuid : unique identifier for a file:
bucket_name/project_id/node_id/file_name = /bucket_name/object_name
state: on of OK, UPLOADING, DELETED
"""
file_uuid: str=""
location_id: str=""
location: str=""
bucket_name: str=""
object_name: str=""
project_id: str=""
project_name: str=""
node_id: str=""
node_name: str=""
file_name: str=""
user_id: str=""
user_name: str=""
def simcore_from_uuid(self, file_uuid: str, bucket_name: str):
parts = file_uuid.split("/")
assert len(parts) == 3
if len(parts) == 3:
self.location = SIMCORE_S3_STR
self.location_id = SIMCORE_S3_ID
self.bucket_name = bucket_name
self.object_name = "/".join(parts[:])
self.file_name = parts[2]
self.project_id = parts[0]
self.node_id = parts[1]
self.file_uuid = file_uuid
| [
"sqlalchemy.MetaData",
"attr.s",
"sqlalchemy.Column"
] | [((392, 405), 'sqlalchemy.MetaData', 'sa.MetaData', ([], {}), '()\n', (403, 405), True, 'import sqlalchemy as sa\n'), ((2050, 2075), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (2056, 2075), False, 'import attr\n'), ((487, 538), 'sqlalchemy.Column', 'sa.Column', (['"""file_uuid"""', 'sa.String'], {'primary_key': '(True)'}), "('file_uuid', sa.String, primary_key=True)\n", (496, 538), True, 'import sqlalchemy as sa\n'), ((544, 579), 'sqlalchemy.Column', 'sa.Column', (['"""location_id"""', 'sa.String'], {}), "('location_id', sa.String)\n", (553, 579), True, 'import sqlalchemy as sa\n'), ((585, 617), 'sqlalchemy.Column', 'sa.Column', (['"""location"""', 'sa.String'], {}), "('location', sa.String)\n", (594, 617), True, 'import sqlalchemy as sa\n'), ((623, 658), 'sqlalchemy.Column', 'sa.Column', (['"""bucket_name"""', 'sa.String'], {}), "('bucket_name', sa.String)\n", (632, 658), True, 'import sqlalchemy as sa\n'), ((664, 699), 'sqlalchemy.Column', 'sa.Column', (['"""object_name"""', 'sa.String'], {}), "('object_name', sa.String)\n", (673, 699), True, 'import sqlalchemy as sa\n'), ((705, 739), 'sqlalchemy.Column', 'sa.Column', (['"""project_id"""', 'sa.String'], {}), "('project_id', sa.String)\n", (714, 739), True, 'import sqlalchemy as sa\n'), ((745, 781), 'sqlalchemy.Column', 'sa.Column', (['"""project_name"""', 'sa.String'], {}), "('project_name', sa.String)\n", (754, 781), True, 'import sqlalchemy as sa\n'), ((787, 818), 'sqlalchemy.Column', 'sa.Column', (['"""node_id"""', 'sa.String'], {}), "('node_id', sa.String)\n", (796, 818), True, 'import sqlalchemy as sa\n'), ((824, 857), 'sqlalchemy.Column', 'sa.Column', (['"""node_name"""', 'sa.String'], {}), "('node_name', sa.String)\n", (833, 857), True, 'import sqlalchemy as sa\n'), ((863, 896), 'sqlalchemy.Column', 'sa.Column', (['"""file_name"""', 'sa.String'], {}), "('file_name', sa.String)\n", (872, 896), True, 'import sqlalchemy as sa\n'), ((902, 933), 'sqlalchemy.Column', 'sa.Column', (['"""user_id"""', 'sa.String'], {}), "('user_id', sa.String)\n", (911, 933), True, 'import sqlalchemy as sa\n'), ((939, 972), 'sqlalchemy.Column', 'sa.Column', (['"""user_name"""', 'sa.String'], {}), "('user_name', sa.String)\n", (948, 972), True, 'import sqlalchemy as sa\n')] |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score
from category_encoders import MEstimateEncoder
import numpy as np
from collections import defaultdict
import os
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
def fit_predict(modelo, enc, data, target, test):
pipe = Pipeline([("encoder", enc), ("model", modelo)])
pipe.fit(data, target)
return pipe.predict(test)
def auc_group(model, data, y_true, dicc, group: str = "", min_samples: int = 50):
aux = data.copy()
aux["target"] = y_true
cats = aux[group].value_counts()
cats = cats[cats > min_samples].index.tolist()
cats = cats + ["all"]
if len(dicc) == 0:
dicc = defaultdict(list, {k: [] for k in cats})
for cat in cats:
if cat != "all":
aux2 = aux[aux[group] == cat]
preds = model.predict_proba(aux2.drop(columns="target"))[:, 1]
truth = aux2["target"]
dicc[cat].append(roc_auc_score(truth, preds))
elif cat == "all":
dicc[cat].append(roc_auc_score(y_true, model.predict_proba(data)[:, 1]))
else:
pass
return dicc
def explain(xgb: bool = True):
"""
Provide a SHAP explanation by fitting MEstimate and GBDT
"""
if xgb:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", GradientBoostingClassifier())]
)
pipe.fit(X_tr, y_tr)
explainer = shap.Explainer(pipe[1])
shap_values = explainer(pipe[:-1].transform(X_tr))
shap.plots.beeswarm(shap_values)
return pd.DataFrame(np.abs(shap_values.values), columns=X_tr.columns).sum()
else:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", LogisticRegression())]
)
pipe.fit(X_tr, y_tr)
coefficients = pd.concat(
[pd.DataFrame(X_tr.columns), pd.DataFrame(np.transpose(pipe[1].coef_))],
axis=1,
)
coefficients.columns = ["feat", "val"]
return coefficients.sort_values(by="val", ascending=False)
def calculate_cm(true, preds):
# Obtain the confusion matrix
cm = confusion_matrix(preds, true)
# https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
return TPR[0]
def metric_calculator(
modelo, data: pd.DataFrame, truth: pd.DataFrame, col: str, group1: str, group2: str
):
aux = data.copy()
aux["target"] = truth
# Filter the data
g1 = data[data[col] == group1]
g2 = data[data[col] == group2]
# Filter the ground truth
g1_true = aux[aux[col] == group1].target
g2_true = aux[aux[col] == group2].target
# Do predictions
p1 = modelo.predict(g1)
p2 = modelo.predict(g2)
# Extract metrics for each group
res1 = calculate_cm(p1, g1_true)
res2 = calculate_cm(p2, g2_true)
return res1 - res2
def plot_rolling(data, roll_mean: int = 5, roll_std: int = 20):
aux = data.rolling(roll_mean).mean().dropna()
stand = data.rolling(roll_std).quantile(0.05, interpolation="lower").dropna()
plt.figure()
for col in data.columns:
plt.plot(aux[col], label=col)
# plt.fill_between(aux.index,(aux[col] - stand[col]),(aux[col] + stand[col]),# color="b",alpha=0.1,)
plt.legend()
plt.show()
def scale_output(data):
return pd.DataFrame(
StandardScaler().fit_transform(data), columns=data.columns, index=data.index
)
import numpy as np
def psi(expected, actual, buckettype="bins", buckets=10, axis=0):
"""Calculate the PSI (population stability index) across all variables
Args:
expected: numpy matrix of original values
actual: numpy matrix of new values, same size as expected
buckettype: type of strategy for creating buckets, bins splits into even splits, quantiles splits into quantile buckets
buckets: number of quantiles to use in bucketing variables
axis: axis by which variables are defined, 0 for vertical, 1 for horizontal
Returns:
psi_values: ndarray of psi values for each variable
Author:
<NAME>
github.com/mwburke
worksofchart.com
"""
def _psi(expected_array, actual_array, buckets):
"""Calculate the PSI for a single variable
Args:
expected_array: numpy array of original values
actual_array: numpy array of new values, same size as expected
buckets: number of percentile ranges to bucket the values into
Returns:
psi_value: calculated PSI value
"""
def scale_range(input, min, max):
input += -(np.min(input))
input /= np.max(input) / (max - min)
input += min
return input
breakpoints = np.arange(0, buckets + 1) / (buckets) * 100
if buckettype == "bins":
breakpoints = scale_range(
breakpoints, np.min(expected_array), np.max(expected_array)
)
elif buckettype == "quantiles":
breakpoints = np.stack(
[np.percentile(expected_array, b) for b in breakpoints]
)
expected_percents = np.histogram(expected_array, breakpoints)[0] / len(
expected_array
)
actual_percents = np.histogram(actual_array, breakpoints)[0] / len(actual_array)
def sub_psi(e_perc, a_perc):
"""Calculate the actual PSI value from comparing the values.
Update the actual value to a very small number if equal to zero
"""
if a_perc == 0:
a_perc = 0.0001
if e_perc == 0:
e_perc = 0.0001
value = (e_perc - a_perc) * np.log(e_perc / a_perc)
return value
psi_value = np.sum(
sub_psi(expected_percents[i], actual_percents[i])
for i in range(0, len(expected_percents))
)
return psi_value
if len(expected.shape) == 1:
psi_values = np.empty(len(expected.shape))
else:
psi_values = np.empty(expected.shape[axis])
for i in range(0, len(psi_values)):
if len(psi_values) == 1:
psi_values = _psi(expected, actual, buckets)
elif axis == 0:
psi_values[i] = _psi(expected[:, i], actual[:, i], buckets)
elif axis == 1:
psi_values[i] = _psi(expected[i, :], actual[i, :], buckets)
return psi_values
def loop_estimators(
estimator_set: list,
normal_data,
normal_data_ood,
shap_data,
shap_data_ood,
performance_ood,
target,
state: str,
error_type: str,
target_shift: bool = False,
output_path: str = "",
):
"""
Loop through the estimators and calculate the performance for each
"""
res = []
for estimator in estimator_set:
## ONLY DATA
X_train, X_test, y_train, y_test = train_test_split(
normal_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(normal_data_ood),
np.nan_to_num(list(performance_ood.values())),
)
res.append([state, error_type, estimator, "Only Data", error_te, error_ood])
if target_shift == False:
#### ONLY SHAP
X_train, X_test, y_train, y_test = train_test_split(
shap_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(
estimator_set[estimator].predict(X_test), y_test
)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(shap_data_ood),
np.nan_to_num(list(performance_ood.values())),
)
res.append([state, error_type, estimator, "Only Shap", error_te, error_ood])
### SHAP + DATA
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([shap_data, normal_data], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(
estimator_set[estimator].predict(X_test), y_test
)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([shap_data_ood, normal_data_ood], axis=1)
),
np.nan_to_num(list(performance_ood.values())),
)
res.append(
[state, error_type, estimator, "Data + Shap", error_te, error_ood]
)
folder = os.path.join("results", state + "_" + error_type + ".csv")
columnas = ["state", "error_type", "estimator", "data", "error_te", "error_ood"]
pd.DataFrame(res, columns=columnas).to_csv(folder, index=False)
def loop_estimators_fairness(
estimator_set: list,
normal_data,
normal_data_ood,
target_shift,
target_shift_ood,
shap_data,
shap_data_ood,
performance_ood,
target,
state: str,
error_type: str,
output_path: str = "",
):
"""
Loop through the estimators and calculate the performance for each
Particular fairness case
"""
res = []
for estimator in estimator_set:
## ONLY DATA
X_train, X_test, y_train, y_test = train_test_split(
normal_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(normal_data_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Data", error_te, error_ood])
#### ONLY SHAP
X_train, X_test, y_train, y_test = train_test_split(
shap_data, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(shap_data_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Shap", error_te, error_ood])
#### ONLY TARGET
X_train, X_test, y_train, y_test = train_test_split(
target_shift, target, test_size=0.33, random_state=42
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(target_shift_ood),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Only Target", error_te, error_ood])
#### TARGET + DISTRIBUTION
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([target_shift, normal_data], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([target_shift_ood, normal_data_ood], axis=1)
),
np.nan_to_num(performance_ood),
)
res.append([state, error_type, estimator, "Data+Target", error_te, error_ood])
### SHAP + DATA
X_train, X_test, y_train, y_test = train_test_split(
pd.concat([shap_data, normal_data, target_shift], axis=1),
target,
test_size=0.33,
random_state=42,
)
estimator_set[estimator].fit(X_train, y_train)
error_te = mean_absolute_error(estimator_set[estimator].predict(X_test), y_test)
error_ood = mean_absolute_error(
estimator_set[estimator].predict(
pd.concat([shap_data_ood, normal_data_ood, target_shift_ood], axis=1)
),
np.nan_to_num(performance_ood),
)
res.append(
[state, error_type, estimator, "Data+Target+Shap", error_te, error_ood]
)
folder = os.path.join("results", state + "_" + error_type + ".csv")
columnas = ["state", "error_type", "estimator", "data", "error_te", "error_ood"]
pd.DataFrame(res, columns=columnas).to_csv(folder, index=False)
| [
"numpy.log",
"sklearn.metrics.roc_auc_score",
"numpy.arange",
"numpy.histogram",
"numpy.max",
"numpy.empty",
"numpy.min",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"numpy.abs",
"sklearn.model_selection.train_test_split",
"sklearn.pipeline.Pipeline",
"sklearn.ensemble.GradientBo... | [((492, 539), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('encoder', enc), ('model', modelo)]"], {}), "([('encoder', enc), ('model', modelo)])\n", (500, 539), False, 'from sklearn.pipeline import Pipeline\n'), ((2336, 2365), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['preds', 'true'], {}), '(preds, true)\n', (2352, 2365), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((2582, 2593), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2589, 2593), True, 'import numpy as np\n'), ((9750, 9808), 'os.path.join', 'os.path.join', (['"""results"""', "(state + '_' + error_type + '.csv')"], {}), "('results', state + '_' + error_type + '.csv')\n", (9762, 9808), False, 'import os\n'), ((13496, 13554), 'os.path.join', 'os.path.join', (['"""results"""', "(state + '_' + error_type + '.csv')"], {}), "('results', state + '_' + error_type + '.csv')\n", (13508, 13554), False, 'import os\n'), ((884, 924), 'collections.defaultdict', 'defaultdict', (['list', '{k: [] for k in cats}'], {}), '(list, {k: [] for k in cats})\n', (895, 924), False, 'from collections import defaultdict\n'), ((2523, 2534), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2530, 2534), True, 'import numpy as np\n'), ((2561, 2572), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2568, 2572), True, 'import numpy as np\n'), ((6920, 6950), 'numpy.empty', 'np.empty', (['expected.shape[axis]'], {}), '(expected.shape[axis])\n', (6928, 6950), True, 'import numpy as np\n'), ((7750, 7820), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(normal_data, target, test_size=0.33, random_state=42)\n', (7766, 7820), False, 'from sklearn.model_selection import train_test_split\n'), ((10461, 10531), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(normal_data, target, test_size=0.33, random_state=42)\n', (10477, 10531), False, 'from sklearn.model_selection import train_test_split\n'), ((11009, 11077), 'sklearn.model_selection.train_test_split', 'train_test_split', (['shap_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(shap_data, target, test_size=0.33, random_state=42)\n', (11025, 11077), False, 'from sklearn.model_selection import train_test_split\n'), ((11553, 11624), 'sklearn.model_selection.train_test_split', 'train_test_split', (['target_shift', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(target_shift, target, test_size=0.33, random_state=42)\n', (11569, 11624), False, 'from sklearn.model_selection import train_test_split\n'), ((8354, 8422), 'sklearn.model_selection.train_test_split', 'train_test_split', (['shap_data', 'target'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(shap_data, target, test_size=0.33, random_state=42)\n', (8370, 8422), False, 'from sklearn.model_selection import train_test_split\n'), ((9898, 9933), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': 'columnas'}), '(res, columns=columnas)\n', (9910, 9933), True, 'import pandas as pd\n'), ((10814, 10844), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (10827, 10844), True, 'import numpy as np\n'), ((11358, 11388), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (11371, 11388), True, 'import numpy as np\n'), ((11908, 11938), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (11921, 11938), True, 'import numpy as np\n'), ((12146, 12192), 'pandas.concat', 'pd.concat', (['[target_shift, normal_data]'], {'axis': '(1)'}), '([target_shift, normal_data], axis=1)\n', (12155, 12192), True, 'import pandas as pd\n'), ((12610, 12640), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (12623, 12640), True, 'import numpy as np\n'), ((12836, 12893), 'pandas.concat', 'pd.concat', (['[shap_data, normal_data, target_shift]'], {'axis': '(1)'}), '([shap_data, normal_data, target_shift], axis=1)\n', (12845, 12893), True, 'import pandas as pd\n'), ((13326, 13356), 'numpy.nan_to_num', 'np.nan_to_num', (['performance_ood'], {}), '(performance_ood)\n', (13339, 13356), True, 'import numpy as np\n'), ((13644, 13679), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': 'columnas'}), '(res, columns=columnas)\n', (13656, 13679), True, 'import pandas as pd\n'), ((1153, 1180), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['truth', 'preds'], {}), '(truth, preds)\n', (1166, 1180), False, 'from sklearn.metrics import roc_auc_score\n'), ((2043, 2069), 'pandas.DataFrame', 'pd.DataFrame', (['X_tr.columns'], {}), '(X_tr.columns)\n', (2055, 2069), True, 'import pandas as pd\n'), ((5497, 5510), 'numpy.min', 'np.min', (['input'], {}), '(input)\n', (5503, 5510), True, 'import numpy as np\n'), ((5533, 5546), 'numpy.max', 'np.max', (['input'], {}), '(input)\n', (5539, 5546), True, 'import numpy as np\n'), ((5634, 5659), 'numpy.arange', 'np.arange', (['(0)', '(buckets + 1)'], {}), '(0, buckets + 1)\n', (5643, 5659), True, 'import numpy as np\n'), ((5780, 5802), 'numpy.min', 'np.min', (['expected_array'], {}), '(expected_array)\n', (5786, 5802), True, 'import numpy as np\n'), ((5804, 5826), 'numpy.max', 'np.max', (['expected_array'], {}), '(expected_array)\n', (5810, 5826), True, 'import numpy as np\n'), ((6032, 6073), 'numpy.histogram', 'np.histogram', (['expected_array', 'breakpoints'], {}), '(expected_array, breakpoints)\n', (6044, 6073), True, 'import numpy as np\n'), ((6147, 6186), 'numpy.histogram', 'np.histogram', (['actual_array', 'breakpoints'], {}), '(actual_array, breakpoints)\n', (6159, 6186), True, 'import numpy as np\n'), ((6574, 6597), 'numpy.log', 'np.log', (['(e_perc / a_perc)'], {}), '(e_perc / a_perc)\n', (6580, 6597), True, 'import numpy as np\n'), ((9022, 9065), 'pandas.concat', 'pd.concat', (['[shap_data, normal_data]'], {'axis': '(1)'}), '([shap_data, normal_data], axis=1)\n', (9031, 9065), True, 'import pandas as pd\n'), ((12528, 12582), 'pandas.concat', 'pd.concat', (['[target_shift_ood, normal_data_ood]'], {'axis': '(1)'}), '([target_shift_ood, normal_data_ood], axis=1)\n', (12537, 12582), True, 'import pandas as pd\n'), ((13229, 13298), 'pandas.concat', 'pd.concat', (['[shap_data_ood, normal_data_ood, target_shift_ood]'], {'axis': '(1)'}), '([shap_data_ood, normal_data_ood, target_shift_ood], axis=1)\n', (13238, 13298), True, 'import pandas as pd\n'), ((1514, 1532), 'category_encoders.MEstimateEncoder', 'MEstimateEncoder', ([], {}), '()\n', (1530, 1532), False, 'from category_encoders import MEstimateEncoder\n'), ((1545, 1573), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1571, 1573), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1787, 1813), 'numpy.abs', 'np.abs', (['shap_values.values'], {}), '(shap_values.values)\n', (1793, 1813), True, 'import numpy as np\n'), ((1903, 1921), 'category_encoders.MEstimateEncoder', 'MEstimateEncoder', ([], {}), '()\n', (1919, 1921), False, 'from category_encoders import MEstimateEncoder\n'), ((2084, 2111), 'numpy.transpose', 'np.transpose', (['pipe[1].coef_'], {}), '(pipe[1].coef_)\n', (2096, 2111), True, 'import numpy as np\n'), ((9467, 9518), 'pandas.concat', 'pd.concat', (['[shap_data_ood, normal_data_ood]'], {'axis': '(1)'}), '([shap_data_ood, normal_data_ood], axis=1)\n', (9476, 9518), True, 'import pandas as pd\n'), ((5934, 5966), 'numpy.percentile', 'np.percentile', (['expected_array', 'b'], {}), '(expected_array, b)\n', (5947, 5966), True, 'import numpy as np\n')] |
# Solution of;
# Project Euler Problem 527: Randomized Binary Search
# https://projecteuler.net/problem=527
#
# A secret integer t is selected at random within the range 1 ≤ t ≤ n. The
# goal is to guess the value of t by making repeated guesses, via integer g.
# After a guess is made, there are three possible outcomes, in which it will
# be revealed that either g < t, g = t, or g > t. Then the process can repeat
# as necessary. Normally, the number of guesses required on average can be
# minimized with a binary search: Given a lower bound L and upper bound H
# (initialized to L = 1 and H = n), let g = ⌊(L+H)/2⌋ where ⌊⋅⌋ is the integer
# floor function. If g = t, the process ends. Otherwise, if g < t, set L =
# g+1, but if g > t instead, set H = g−1. After setting the new bounds, the
# search process repeats, and ultimately ends once t is found. Even if t can
# be deduced without searching, assume that a search will be required anyway
# to confirm the value. Your friend Bob believes that the standard binary
# search is not that much better than his randomized variant: Instead of
# setting g = ⌊(L+H)/2⌋, simply let g be a random integer between L and H,
# inclusive. The rest of the algorithm is the same as the standard binary
# search. This new search routine will be referred to as a random binary
# search. Given that 1 ≤ t ≤ n for random t, let B(n) be the expected number
# of guesses needed to find t using the standard binary search, and let R(n)
# be the expected number of guesses needed to find t using the random binary
# search. For example, B(6) = 2. 33333333 and R(6) = 2. 71666667 when rounded
# to 8 decimal places. Find R(1010) − B(1010) rounded to 8 decimal places.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 527
timed.caller(dummy, n, i, prob_id)
| [
"timed.caller"
] | [((1894, 1928), 'timed.caller', 'timed.caller', (['dummy', 'n', 'i', 'prob_id'], {}), '(dummy, n, i, prob_id)\n', (1906, 1928), False, 'import timed\n')] |
import asyncio
from aiohttp import web
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Queue, Process
import os
from time import sleep
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
tick = asyncio.Condition()
async def wshandler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
recv_task = None
tick_task = None
while 1:
if not recv_task:
recv_task = asyncio.ensure_future(ws.receive())
if not tick_task:
await tick.acquire()
tick_task = asyncio.ensure_future(tick.wait())
done, pending = await asyncio.wait(
[recv_task,
tick_task],
return_when=asyncio.FIRST_COMPLETED)
if recv_task in done:
msg = recv_task.result()
if msg.tp == web.MsgType.text:
print("Got message %s" % msg.data)
ws.send_str("Pressed key code: {}".format(msg.data))
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
recv_task = None
if tick_task in done:
ws.send_str("game loop ticks")
tick.release()
tick_task = None
return ws
def game_loop(asyncio_loop):
# coroutine to run in main thread
async def notify():
await tick.acquire()
tick.notify_all()
tick.release()
queue = Queue()
# function to run in a different process
def worker():
while 1:
print("doing heavy calculation in process {}".format(os.getpid()))
sleep(1)
queue.put("calculation result")
Process(target=worker).start()
while 1:
# blocks this thread but not main thread with event loop
result = queue.get()
print("getting {} in process {}".format(result, os.getpid()))
task = asyncio.run_coroutine_threadsafe(notify(), asyncio_loop)
task.result()
asyncio_loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
asyncio_loop.run_in_executor(executor, game_loop, asyncio_loop)
app = web.Application()
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
| [
"aiohttp.web.run_app",
"concurrent.futures.ThreadPoolExecutor",
"multiprocessing.Process",
"aiohttp.web.Response",
"aiohttp.web.Application",
"asyncio.wait",
"time.sleep",
"asyncio.Condition",
"os.getpid",
"multiprocessing.Queue",
"asyncio.get_event_loop",
"aiohttp.web.WebSocketResponse"
] | [((353, 372), 'asyncio.Condition', 'asyncio.Condition', ([], {}), '()\n', (370, 372), False, 'import asyncio\n'), ((2132, 2156), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2154, 2156), False, 'import asyncio\n'), ((2168, 2201), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (2186, 2201), False, 'from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n'), ((2273, 2290), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (2288, 2290), False, 'from aiohttp import web\n'), ((2385, 2401), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (2396, 2401), False, 'from aiohttp import web\n'), ((291, 343), 'aiohttp.web.Response', 'web.Response', ([], {'body': 'content', 'content_type': '"""text/html"""'}), "(body=content, content_type='text/html')\n", (303, 343), False, 'from aiohttp import web\n'), ((413, 436), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (434, 436), False, 'from aiohttp import web\n'), ((1575, 1582), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1580, 1582), False, 'from multiprocessing import Queue, Process\n'), ((758, 831), 'asyncio.wait', 'asyncio.wait', (['[recv_task, tick_task]'], {'return_when': 'asyncio.FIRST_COMPLETED'}), '([recv_task, tick_task], return_when=asyncio.FIRST_COMPLETED)\n', (770, 831), False, 'import asyncio\n'), ((1755, 1763), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1760, 1763), False, 'from time import sleep\n'), ((1813, 1835), 'multiprocessing.Process', 'Process', ([], {'target': 'worker'}), '(target=worker)\n', (1820, 1835), False, 'from multiprocessing import Queue, Process\n'), ((2008, 2019), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2017, 2019), False, 'import os\n'), ((1729, 1740), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1738, 1740), False, 'import os\n')] |
import pytest
from fixtures import world
from wecs.core import UID
from wecs.core import NoSuchUID
from wecs.core import Component
@Component()
class Reference:
uid: UID
def test_user_defined_names(world):
entity = world.create_entity(name="foo")
assert entity._uid.name == "foo"
def test_automatic_names(world):
entity = world.create_entity()
assert entity._uid.name
def test_automatic_unique_names(world):
entity_1 = world.create_entity()
entity_2 = world.create_entity()
assert entity_1._uid.name != entity_2._uid.name
# This test feels silly... More on it when serialization comes knocking.
def test_uid():
uid_1 = UID()
uid_2 = UID()
assert uid_1 is not uid_2
assert uid_1 != uid_2
def test_reference():
c = Reference(uid=UID())
def test_resolving_reference(world):
to_entity = world.create_entity()
from_entity = world.create_entity()
from_entity.add_component(Reference(uid=to_entity._uid))
world.flush_component_updates()
reference = world.get_entity(from_entity.get_component(Reference).uid)
assert reference is to_entity
def test_resolving_dangling_reference(world):
to_entity = world.create_entity()
from_entity = world.create_entity()
from_entity.add_component(Reference(uid=to_entity._uid))
to_entity.destroy()
world.flush_component_updates()
with pytest.raises(NoSuchUID):
world.get_entity(from_entity.get_component(Reference).uid)
| [
"wecs.core.Component",
"fixtures.world.create_entity",
"fixtures.world.flush_component_updates",
"wecs.core.UID",
"pytest.raises"
] | [((136, 147), 'wecs.core.Component', 'Component', ([], {}), '()\n', (145, 147), False, 'from wecs.core import Component\n'), ((229, 260), 'fixtures.world.create_entity', 'world.create_entity', ([], {'name': '"""foo"""'}), "(name='foo')\n", (248, 260), False, 'from fixtures import world\n'), ((346, 367), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (365, 367), False, 'from fixtures import world\n'), ((453, 474), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (472, 474), False, 'from fixtures import world\n'), ((490, 511), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (509, 511), False, 'from fixtures import world\n'), ((667, 672), 'wecs.core.UID', 'UID', ([], {}), '()\n', (670, 672), False, 'from wecs.core import UID\n'), ((685, 690), 'wecs.core.UID', 'UID', ([], {}), '()\n', (688, 690), False, 'from wecs.core import UID\n'), ((855, 876), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (874, 876), False, 'from fixtures import world\n'), ((895, 916), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (914, 916), False, 'from fixtures import world\n'), ((982, 1013), 'fixtures.world.flush_component_updates', 'world.flush_component_updates', ([], {}), '()\n', (1011, 1013), False, 'from fixtures import world\n'), ((1187, 1208), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (1206, 1208), False, 'from fixtures import world\n'), ((1227, 1248), 'fixtures.world.create_entity', 'world.create_entity', ([], {}), '()\n', (1246, 1248), False, 'from fixtures import world\n'), ((1338, 1369), 'fixtures.world.flush_component_updates', 'world.flush_component_updates', ([], {}), '()\n', (1367, 1369), False, 'from fixtures import world\n'), ((1379, 1403), 'pytest.raises', 'pytest.raises', (['NoSuchUID'], {}), '(NoSuchUID)\n', (1392, 1403), False, 'import pytest\n'), ((793, 798), 'wecs.core.UID', 'UID', ([], {}), '()\n', (796, 798), False, 'from wecs.core import UID\n')] |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from pages.home_page import HomePage
from pages.profile_page import ProfilePage
from pages.login_page import LoginPage
from pages.registration_page import RegistrationPage
from pages.article_page import ArticlePage
from pages.new_article_page import NewArticlePage
from pages.navigation_bar import NavigationBar
import pytest
import csv
browser_options = Options()
browser_options.add_experimental_option("excludeSwitches", ["enable-logging"])
browser_options.headless = True
URL = 'http://localhost:1667'
class Test_Conduit_Logged_In:
def setup_method(self, method):
self.browser = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.browser.maximize_window()
self.browser.get(URL)
self.homepage = HomePage(driver=self.browser)
self.homepage.login_button.click()
login_page = LoginPage(driver=self.browser)
login_page.fill_login_details('<EMAIL>', 'Teszt1teszt')
login_page.signin_button.click()
def teardown_method(self, method):
self.browser.close()
def test_one_article(self):
self.homepage = HomePage(driver=self.browser)
self.homepage.logout_button.find()
self.homepage.article_button.click()
new_article_page = NewArticlePage(driver=self.browser)
new_article_page.title_input.send_text_to_input("Title")
new_article_page.summary_input.send_text_to_input("Summary")
new_article_page.main_body_input.send_text_to_input("Main article")
new_article_page.tags_input.send_text_to_input("nonsense")
new_article_page.publish_button.click()
article_page = ArticlePage(driver=self.browser)
assert article_page.main_textfield.text() == "Main article"
def test_new_articles(self):
number_of_paginator = len(self.homepage.page_list_buttons)
reader = csv.reader(open('./vizsgaremek/articles.csv', 'r'), delimiter=';')
for row in reader:
navigation_bar = NavigationBar(driver=self.browser)
navigation_bar.logout_button.find()
navigation_bar.article_button.click()
new_article_page = NewArticlePage(driver=self.browser)
new_article_page.title_input.send_text_to_input(row[0])
new_article_page.summary_input.send_text_to_input(row[1])
new_article_page.main_body_input.send_text_to_input(row[2])
new_article_page.tags_input.send_text_to_input(row[3])
new_article_page.publish_button.click()
navigation_bar.home_button.click()
assert len(self.homepage.page_list_buttons) > number_of_paginator
def test_page_list(self):
self.homepage = HomePage(driver=self.browser)
for x in self.homepage.page_list_buttons:
x.click()
self.homepage = HomePage(driver=self.browser)
assert self.homepage.is_last_page_active()
def test_list_articles(self):
assert len(self.homepage.article_list) > 0
def test_change_article(self):
article_page = self.create_article()
txt_to_change = article_page.main_textfield.text()
article_page.edit_button.find()
article_page.edit_button.click()
article_edit_page = NewArticlePage(self.browser)
article_edit_page.main_body_input.send_text_to_input(txt_to_change[:len(txt_to_change)//2].strip() + "changed")
article_edit_page.publish_button.click()
assert article_page.main_textfield.text() == txt_to_change[:len(txt_to_change)//2].strip() + "changed"
def test_save_to_file(self):
self.homepage.profile_button.click()
profile_page = ProfilePage(self.browser)
self.homepage.article_list[0].click()
article_page = ArticlePage(self.browser)
txt_to_save = article_page.main_textfield.text()
txt_file = open("./vizsgaremek/test.txt", "w")
txt_file.write(txt_to_save)
txt_file.close()
txt_file = open("./vizsgaremek/test.txt", "r")
assert txt_file.read() == txt_to_save
txt_file.close()
def test_delete_article(self):
article_page = self.create_article()
article_page.delete_button.find()
article_page.delete_button.click()
assert (article_page.delete_popup.text() == "Deleted the article. Going home...")
def test_logout(self):
self.homepage.logout_button.click()
assert self.homepage.login_button.text().strip() == "Sign in"
def create_article(self):
self.homepage.logout_button.find()
self.homepage.article_button.click()
new_article_page = NewArticlePage(driver=self.browser)
new_article_page.title_input.send_text_to_input("Test article title")
new_article_page.summary_input.send_text_to_input("Test article summary")
new_article_page.main_body_input.send_text_to_input("Test article main text")
new_article_page.tags_input.send_text_to_input("test, article, tags")
new_article_page.publish_button.click()
return ArticlePage(driver=self.browser) | [
"selenium.webdriver.chrome.options.Options",
"pages.profile_page.ProfilePage",
"pages.home_page.HomePage",
"pages.login_page.LoginPage",
"pages.navigation_bar.NavigationBar",
"pages.article_page.ArticlePage",
"pages.new_article_page.NewArticlePage",
"webdriver_manager.chrome.ChromeDriverManager"
] | [((498, 507), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (505, 507), False, 'from selenium.webdriver.chrome.options import Options\n'), ((908, 937), 'pages.home_page.HomePage', 'HomePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (916, 937), False, 'from pages.home_page import HomePage\n'), ((1002, 1032), 'pages.login_page.LoginPage', 'LoginPage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (1011, 1032), False, 'from pages.login_page import LoginPage\n'), ((1272, 1301), 'pages.home_page.HomePage', 'HomePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (1280, 1301), False, 'from pages.home_page import HomePage\n'), ((1417, 1452), 'pages.new_article_page.NewArticlePage', 'NewArticlePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (1431, 1452), False, 'from pages.new_article_page import NewArticlePage\n'), ((1801, 1833), 'pages.article_page.ArticlePage', 'ArticlePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (1812, 1833), False, 'from pages.article_page import ArticlePage\n'), ((2844, 2873), 'pages.home_page.HomePage', 'HomePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (2852, 2873), False, 'from pages.home_page import HomePage\n'), ((3406, 3434), 'pages.new_article_page.NewArticlePage', 'NewArticlePage', (['self.browser'], {}), '(self.browser)\n', (3420, 3434), False, 'from pages.new_article_page import NewArticlePage\n'), ((3825, 3850), 'pages.profile_page.ProfilePage', 'ProfilePage', (['self.browser'], {}), '(self.browser)\n', (3836, 3850), False, 'from pages.profile_page import ProfilePage\n'), ((3920, 3945), 'pages.article_page.ArticlePage', 'ArticlePage', (['self.browser'], {}), '(self.browser)\n', (3931, 3945), False, 'from pages.article_page import ArticlePage\n'), ((4805, 4840), 'pages.new_article_page.NewArticlePage', 'NewArticlePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (4819, 4840), False, 'from pages.new_article_page import NewArticlePage\n'), ((5228, 5260), 'pages.article_page.ArticlePage', 'ArticlePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (5239, 5260), False, 'from pages.article_page import ArticlePage\n'), ((2143, 2177), 'pages.navigation_bar.NavigationBar', 'NavigationBar', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (2156, 2177), False, 'from pages.navigation_bar import NavigationBar\n'), ((2307, 2342), 'pages.new_article_page.NewArticlePage', 'NewArticlePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (2321, 2342), False, 'from pages.new_article_page import NewArticlePage\n'), ((2974, 3003), 'pages.home_page.HomePage', 'HomePage', ([], {'driver': 'self.browser'}), '(driver=self.browser)\n', (2982, 3003), False, 'from pages.home_page import HomePage\n'), ((757, 778), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (776, 778), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')] |
import numpy as np
import numpy.linalg as LA
from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1
from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2
from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3
from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4
from .path_alg import solve_path, pathalgo_general, h_lambdamax
"""
Classo and pathlasso are the main functions,
they can call every algorithm acording
to the method and formulation required
"""
# can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'
def Classo(
matrix,
lam,
typ="R1",
meth="DR",
rho=1.345,
get_lambdamax=False,
true_lam=False,
e=None,
rho_classification=-1.0,
w=None,
intercept=False,
return_sigm=True,
):
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R3":
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
e = len(matrices[0]) / 2
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R3(pb, lam / lambdamax)
else:
beta, s = Classo_R3(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
elif typ == "R4":
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
e = len(matrices[0])
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R4(pb, lam / lambdamax)
else:
beta, s = Classo_R4(pb, lam)
elif typ == "R2":
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "ODE"
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R2(pb, lam / lambdamax)
else:
beta = Classo_R2(pb, lam)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
out = solve_path(
matrices,
lam / lambdamax,
False,
rho_classification,
"C2",
intercept=intercept,
)
else:
out = solve_path(
matrices, lam, False, rho_classification, "C2", intercept=intercept
)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
out = solve_path(
matrices, lam / lambdamax, False, 0, "C1", intercept=intercept
)
else:
out = solve_path(matrices, lam, False, 0, "C1", intercept=intercept)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
else: # LS
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "DR"
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R1(pb, lam / lambdamax)
else:
beta = Classo_R1(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
if w is not None:
if intercept:
beta[1:] = beta[1:] / w
else:
beta = beta / w
if typ in ["R3", "R4"] and return_sigm:
if get_lambdamax:
return (lambdamax, beta, s)
else:
return (beta, s)
if get_lambdamax:
return (lambdamax, beta)
else:
return beta
def pathlasso(
matrix,
lambdas=False,
n_active=0,
lamin=1e-2,
typ="R1",
meth="Path-Alg",
rho=1.345,
true_lam=False,
e=None,
return_sigm=False,
rho_classification=-1.0,
w=None,
intercept=False,
):
Nactive = n_active
if Nactive == 0:
Nactive = False
if type(lambdas) is bool:
lambdas = lamin ** (np.linspace(0.0, 1, 100))
if lambdas[0] < lambdas[-1]:
lambdass = [
lambdas[i] for i in range(len(lambdas) - 1, -1, -1)
] # reverse the list if needed
else:
lambdass = [lambdas[i] for i in range(len(lambdas))]
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R2":
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R2(pb, lambdass, n_active=Nactive)
elif typ == "R3":
if intercept:
# here we use the fact that for R1 and R3, the intercept is simple beta0 = ybar-Xbar .vdot(beta) so by changing the X to X-Xbar and y to y-ybar we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R3(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
elif typ == "R4":
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R4(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices,
lambdass,
"C2",
n_active=Nactive,
rho=rho_classification,
intercept=intercept,
)
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices, lambdass, "C1", n_active=Nactive, intercept=intercept
)
else: # R1
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R1(pb, lambdass, n_active=n_active)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
real_path = [lam * lambdamax for lam in lambdass]
if w is not None:
if intercept:
ww = np.array([1] + list(w))
else:
ww = w
BETA = np.array([beta / ww for beta in BETA])
if typ in ["R3", "R4"] and return_sigm:
return (np.array(BETA), real_path, S)
return (np.array(BETA), real_path)
| [
"numpy.vdot",
"numpy.array",
"numpy.linspace",
"numpy.mean"
] | [((9722, 9762), 'numpy.array', 'np.array', (['[(beta / ww) for beta in BETA]'], {}), '([(beta / ww) for beta in BETA])\n', (9730, 9762), True, 'import numpy as np\n'), ((9864, 9878), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (9872, 9878), True, 'import numpy as np\n'), ((5733, 5757), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', '(100)'], {}), '(0.0, 1, 100)\n', (5744, 5757), True, 'import numpy as np\n'), ((7222, 7236), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (7230, 7236), True, 'import numpy as np\n'), ((9822, 9836), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (9830, 9836), True, 'import numpy as np\n'), ((1188, 1206), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1195, 1206), True, 'import numpy as np\n'), ((1208, 1218), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1215, 1218), True, 'import numpy as np\n'), ((1845, 1864), 'numpy.vdot', 'np.vdot', (['Xbar', 'beta'], {}), '(Xbar, beta)\n', (1852, 1864), True, 'import numpy as np\n'), ((7186, 7197), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (7194, 7197), True, 'import numpy as np\n'), ((7986, 8000), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (7994, 8000), True, 'import numpy as np\n'), ((6654, 6672), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (6661, 6672), True, 'import numpy as np\n'), ((6674, 6684), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (6681, 6684), True, 'import numpy as np\n'), ((7950, 7961), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (7958, 7961), True, 'import numpy as np\n'), ((4513, 4531), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4520, 4531), True, 'import numpy as np\n'), ((4533, 4543), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4540, 4543), True, 'import numpy as np\n'), ((4924, 4943), 'numpy.vdot', 'np.vdot', (['Xbar', 'beta'], {}), '(Xbar, beta)\n', (4931, 4943), True, 'import numpy as np\n'), ((9128, 9146), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (9135, 9146), True, 'import numpy as np\n'), ((9148, 9158), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (9155, 9158), True, 'import numpy as np\n')] |
from tensorpy import image_base
classifications = image_base.classify_folder_images('./images')
print("*** Displaying Image Classification Results as a list: ***")
for classification in classifications:
print(classification)
| [
"tensorpy.image_base.classify_folder_images"
] | [((51, 96), 'tensorpy.image_base.classify_folder_images', 'image_base.classify_folder_images', (['"""./images"""'], {}), "('./images')\n", (84, 96), False, 'from tensorpy import image_base\n')] |
import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.models.cnn import load_cnn
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
import deep_audio_features.bin.config
import numpy
def test_model(modelpath, ifile, layers_dropped,
test_segmentation=False, verbose=True):
"""Loads a model and predicts each classes probability
Arguments:
modelpath {str} : A path where the model was stored.
ifile {str} : A path of a given wav file,
which will be tested.
test_segmentation {bool}: If True extracts segment level
predictions of a sequence
verbose {bool}: If True prints the predictions
Returns:
y_pred {np.array} : An array with the probability of each class
that the model predicts.
posteriors {np.array}: An array containing the unormalized
posteriors of each class.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
# Restore model
model, hop_length, window_length = load_cnn(modelpath)
model = model.to(device)
class_names = model.classes_mapping
max_seq_length = model.max_sequence_length
zero_pad = model.zero_pad
spec_size = model.spec_size
fuse = model.fuse
# Apply layer drop
model = drop_layers(model, layers_dropped)
model.max_sequence_length = max_seq_length
# print('Model:\n{}'.format(model))
# Move to device
model.to(device)
# Create test set
test_set = FeatureExtractorDataset(X=[ifile],
# Random class -- does not matter at all
y=[0],
fe_method="MEL_SPECTROGRAM",
oversampling=False,
max_sequence_length=max_seq_length,
zero_pad=zero_pad,
forced_size=spec_size,
fuse=fuse, show_hist=False,
test_segmentation=test_segmentation,
hop_length=hop_length, window_length=window_length)
# Create test dataloader
test_loader = DataLoader(dataset=test_set, batch_size=1,
num_workers=4, drop_last=False,
shuffle=False)
# Forward a sample
posteriors, y_pred, _ = test(model=model, dataloader=test_loader,
cnn=True,
classifier=True if layers_dropped == 0
else False)
if verbose:
print("--> Unormalized posteriors:\n {}\n".format(posteriors))
print("--> Predictions:\n {}".format([class_names[yy] for yy in y_pred]))
return y_pred, numpy.array(posteriors)
if __name__ == '__main__':
# Read arguments -- model
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True,
type=str, help='Model')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
parser.add_argument('-s', '--segmentation', required=False,
action='store_true',
help='Return segment predictions')
parser.add_argument('-L', '--layers', required=False, default=0,
help='Number of final layers to cut. Default is 0.')
args = parser.parse_args()
# Get arguments
model = args.model
ifile = args.input
layers_dropped = int(args.layers)
segmentation = args.segmentation
# Test the model
d, p = test_model(modelpath=model, ifile=ifile,
layers_dropped=layers_dropped,
test_segmentation=segmentation)
| [
"deep_audio_features.models.cnn.load_cnn",
"deep_audio_features.utils.model_editing.drop_layers",
"argparse.ArgumentParser",
"deep_audio_features.dataloading.dataloading.FeatureExtractorDataset",
"deep_audio_features.lib.training.test",
"os.path.realpath",
"numpy.array",
"torch.cuda.is_available",
"... | [((1363, 1382), 'deep_audio_features.models.cnn.load_cnn', 'load_cnn', (['modelpath'], {}), '(modelpath)\n', (1371, 1382), False, 'from deep_audio_features.models.cnn import load_cnn\n'), ((1621, 1655), 'deep_audio_features.utils.model_editing.drop_layers', 'drop_layers', (['model', 'layers_dropped'], {}), '(model, layers_dropped)\n', (1632, 1655), False, 'from deep_audio_features.utils.model_editing import drop_layers\n'), ((1825, 2127), 'deep_audio_features.dataloading.dataloading.FeatureExtractorDataset', 'FeatureExtractorDataset', ([], {'X': '[ifile]', 'y': '[0]', 'fe_method': '"""MEL_SPECTROGRAM"""', 'oversampling': '(False)', 'max_sequence_length': 'max_seq_length', 'zero_pad': 'zero_pad', 'forced_size': 'spec_size', 'fuse': 'fuse', 'show_hist': '(False)', 'test_segmentation': 'test_segmentation', 'hop_length': 'hop_length', 'window_length': 'window_length'}), "(X=[ifile], y=[0], fe_method='MEL_SPECTROGRAM',\n oversampling=False, max_sequence_length=max_seq_length, zero_pad=\n zero_pad, forced_size=spec_size, fuse=fuse, show_hist=False,\n test_segmentation=test_segmentation, hop_length=hop_length,\n window_length=window_length)\n", (1848, 2127), False, 'from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset\n'), ((2590, 2683), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': '(1)', 'num_workers': '(4)', 'drop_last': '(False)', 'shuffle': '(False)'}), '(dataset=test_set, batch_size=1, num_workers=4, drop_last=False,\n shuffle=False)\n', (2600, 2683), False, 'from torch.utils.data import DataLoader\n'), ((2790, 2897), 'deep_audio_features.lib.training.test', 'test', ([], {'model': 'model', 'dataloader': 'test_loader', 'cnn': '(True)', 'classifier': '(True if layers_dropped == 0 else False)'}), '(model=model, dataloader=test_loader, cnn=True, classifier=True if \n layers_dropped == 0 else False)\n', (2794, 2897), False, 'from deep_audio_features.lib.training import test\n'), ((3279, 3304), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3302, 3304), False, 'import argparse\n'), ((1267, 1292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1290, 1292), False, 'import torch\n'), ((3182, 3205), 'numpy.array', 'numpy.array', (['posteriors'], {}), '(posteriors)\n', (3193, 3205), False, 'import numpy\n'), ((137, 163), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (153, 163), False, 'import sys, os\n')] |
import re
import traceback
from textwrap import dedent
def camel_to_snake(value):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', value)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snake_to_camel(value):
camel = ''
words = value.split('_')
for w in words:
camel += w.title()
return camel
def multireplace(string, replacements, ignore_case=False):
"""
Given a string and a dict, replaces occurrences of the dict keys found in the
string, with their corresponding values. The replacements will occur in "one pass",
i.e. there should be no clashes.
:param str string: string to perform replacements on
:param dict replacements: replacement dictionary {str_to_find: str_to_replace_with}
:param bool ignore_case: whether to ignore case when looking for matches
:rtype: str the replaced string
"""
rep_sorted = sorted(replacements, key=lambda s: len(s[0]), reverse=True)
rep_escaped = [re.escape(replacement) for replacement in rep_sorted]
pattern = re.compile("|".join(rep_escaped), re.I if ignore_case else 0)
return pattern.sub(lambda match: replacements[match.group(0)], string)
def printvar(var):
print(traceback.extract_stack(limit=2)[0][3][9:][:-1],"=", var)
if __name__ == '__main__':
print(camel_to_snake('CamelToSnake'))
print(snake_to_camel('snake_to_camel'))
printvar('test')
| [
"re.sub",
"re.escape",
"traceback.extract_stack"
] | [((93, 137), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'value'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', value)\n", (99, 137), False, 'import re\n'), ((973, 995), 're.escape', 're.escape', (['replacement'], {}), '(replacement)\n', (982, 995), False, 'import re\n'), ((148, 190), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (154, 190), False, 'import re\n'), ((1209, 1241), 'traceback.extract_stack', 'traceback.extract_stack', ([], {'limit': '(2)'}), '(limit=2)\n', (1232, 1241), False, 'import traceback\n')] |
import sys
import os
import random
klasorAdi = os.path.dirname(sys.argv[0])
dosyaIsmi = klasorAdi + "/test.txt"
soruSayisi = 40
ogrenciSayisi = 60
d = {}
dogruSayisi = {}
yalisSayisi = {}
bosSayisi = {}
puan = {}
def sinavHazirla():
for j in range(1, soruSayisi + 1):
r1 = random.randint(1, 5)
d[0, j] = chr(64 + r1)
for i in range(1, ogrenciSayisi + 1):
for j in range(1, soruSayisi + 1):
r1 = random.randint(1, 5)
r2 = random.randint(0, 99)
d[i, j] = chr(64 + r1)
if r2 in range(41, 61):
d[i, j] = chr(32)
if r2 in range(61, 100):
d[i, j] = d[0, j]
def sinavDegerlendir():
for i in range(1, ogrenciSayisi + 1):
dogruSayisi[i] = 0
yalisSayisi[i] = 0
bosSayisi[i] = 0
puan[i] = 0
soruBasinaDusenPuan = 100 / soruSayisi
for i in range(1, ogrenciSayisi + 1):
for j in range(1, soruSayisi + 1):
if d[i, j] != chr(32):
if d[i, j] == d[0, j]:
dogruSayisi[i] += 1
else:
d[i, j] = chr(ord(d[i, j]) + 32)
yalisSayisi[i] += 1
bosSayisi[i] = soruSayisi - (dogruSayisi[i] + yalisSayisi[i])
puan[i] = soruBasinaDusenPuan * dogruSayisi[i]
def sinavSirala():
for i in range(1, ogrenciSayisi):
for j in range(i + 1, ogrenciSayisi + 1):
if puan[i] < puan[j]:
for k in range(1, soruSayisi + 1):
g = d[i, k]
d[i, k] = d[j, k]
d[j, k] = g
g = dogruSayisi[i] ; dogruSayisi[i] = dogruSayisi[j] ; dogruSayisi[j] = g
g = yalisSayisi[i] ; yalisSayisi[i] = yalisSayisi[j] ; yalisSayisi[j] = g
g = bosSayisi[i] ; bosSayisi[i] = bosSayisi[j] ; bosSayisi[j] = g
g = puan[i] ; puan[i] = puan[j] ; puan[j] = g
def sinavYaz():
dosya = open(dosyaIsmi, "w")
s = ' '
for j in range(1, soruSayisi + 1):
s += d[0 ,j]
print(s, file=dosya)
for i in range(1, ogrenciSayisi + 1):
s = '%3d.' % i
for j in range(1, soruSayisi + 1):
s += d[i, j]
s += ' ** Doğru Sayısı:%3d Yanlış Sayısı:%3d Boş Sayısı:%3d Puan:%6.2f' %\
(dogruSayisi[i], yalisSayisi[i], bosSayisi[i], puan[i])
print(s, file=dosya)
dosya.close()
def sinavOku():
if os.path.isfile(dosyaIsmi)==False:
print("dosya diskte mevcut değil")
else:
dosya = open(dosyaIsmi, "r")
for s in dosya:
print(s, end="")
dosya.close()
sinavHazirla()
sinavDegerlendir()
sinavSirala()
sinavYaz()
sinavOku()
| [
"os.path.isfile",
"os.path.dirname",
"random.randint"
] | [((47, 75), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (62, 75), False, 'import os\n'), ((281, 301), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (295, 301), False, 'import random\n'), ((2184, 2209), 'os.path.isfile', 'os.path.isfile', (['dosyaIsmi'], {}), '(dosyaIsmi)\n', (2198, 2209), False, 'import os\n'), ((420, 440), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (434, 440), False, 'import random\n'), ((452, 473), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (466, 473), False, 'import random\n')] |
from twisted.plugin import IPlugin
from heufybot.moduleinterface import IBotModule
from heufybot.modules.commandinterface import BotCommand
from heufybot.utils.timeutils import now, timestamp
from zope.interface import implements
from datetime import datetime
class TimeCommand(BotCommand):
implements(IPlugin, IBotModule)
name = "Time"
timeBaseURL = "https://maps.googleapis.com/maps/api/timezone/json?"
def triggers(self):
return ["time"]
def load(self):
self.help = "Commands: time <lat> <lon>, time <place>, time <nickname> | Get the current local time for the " \
"given latlon, place or user."
self.commandHelp = {}
self.googleKey = None
if "api-keys" not in self.bot.storage:
self.bot.storage["api-keys"] = {}
if "google" in self.bot.storage["api-keys"]:
self.googleKey = self.bot.storage["api-keys"]["google"]
def execute(self, server, source, command, params, data):
if not self.googleKey:
self.replyPRIVMSG(server, source, "No API key found.")
return
# Use the user's nickname as a parameter if none were given
if len(params) == 0:
params.append(data["user"].nick)
selfSearch = True
else:
selfSearch = False
# Try using latlon to get the location
try:
lat = float(params[0])
lon = float(params[1])
location = self.bot.moduleHandler.runActionUntilValue("geolocation-latlon", lat, lon)
if not location:
self.replyPRIVMSG(server, source, "I can't determine locations at the moment. Try again later.")
return
if not location["success"]:
self.replyPRIVMSG(server, source, "I don't think that's even a location in this multiverse...")
return
self._handleCommandWithLocation(server, source, location)
return
except (IndexError, ValueError):
pass # The user did not give a latlon, so continue using other methods
# Try to determine the user's location from a nickname
if self.bot.config.serverItemWithDefault(server, "use_userlocation", False):
userLoc = self.bot.moduleHandler.runActionUntilValue("userlocation", server, source, params[0], selfSearch)
if selfSearch:
if not userLoc:
return
elif not userLoc["success"]:
return
if userLoc and userLoc["success"]:
if "lat" in userLoc:
location = self.bot.moduleHandler.runActionUntilValue("geolocation-latlon", userLoc["lat"],
userLoc["lon"])
else:
location = self.bot.moduleHandler.runActionUntilValue("geolocation-place", userLoc["place"])
if not location:
self.replyPRIVMSG(server, source, "I can't determine locations at the moment. Try again later.")
return
if not location["success"]:
self.replyPRIVMSG(server, source, "I don't think that's even a location in this multiverse...")
return
self._handleCommandWithLocation(server, source, location)
return
# Try to determine the location by the name of the place
location = self.bot.moduleHandler.runActionUntilValue("geolocation-place", " ".join(params))
if not location:
self.replyPRIVMSG(server, source, "I can't determine locations at the moment. Try again later.")
return
if not location["success"]:
self.replyPRIVMSG(server, source, "I don't think that's even a location in this multiverse...")
return
self._handleCommandWithLocation(server, source, location)
def _handleCommandWithLocation(self, server, source, location):
formattedTime = self._getTime(location["latitude"], location["longitude"])
self.replyPRIVMSG(server, source, "Location: {} | {}".format(location["locality"], formattedTime))
def _getTime(self, lat, lon):
currentTime = timestamp(now())
params = {
"location": "{},{}".format(lat, lon),
"timestamp": currentTime,
"key": self.googleKey
}
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", self.timeBaseURL, params)
if not result:
return "No time for this location could be found at this moment. Try again later."
timeJSON = result.json()
if timeJSON["status"] != "OK":
if "error_message" in timeJSON:
return timeJSON["error_message"]
else:
return "An unknown error occurred while requesting the time."
resultDate = datetime.fromtimestamp(currentTime + int(timeJSON["dstOffset"]) + int(timeJSON["rawOffset"]))
properDay = self._getProperDay(resultDate.day)
formattedTime = resultDate.strftime("%H:%M (%I:%M %p) on %A, " + properDay + " of %B, %Y")
return "Timezone: {} | Local time is {}".format(timeJSON["timeZoneName"], formattedTime)
def _getProperDay(self, day):
if day in [1, 21, 31]:
return "{}st".format(day)
elif day in [2, 22]:
return "{}nd".format(day)
elif day in [3, 33]:
return "{}rd".format(day)
else:
return "{}th".format(day)
timeCommand = TimeCommand()
| [
"heufybot.utils.timeutils.now",
"zope.interface.implements"
] | [((297, 328), 'zope.interface.implements', 'implements', (['IPlugin', 'IBotModule'], {}), '(IPlugin, IBotModule)\n', (307, 328), False, 'from zope.interface import implements\n'), ((4317, 4322), 'heufybot.utils.timeutils.now', 'now', ([], {}), '()\n', (4320, 4322), False, 'from heufybot.utils.timeutils import now, timestamp\n')] |
import pandas as pd
import click
import collections
def kmer_suffix(kmer):
return kmer[1:]
def kmer_prefix(kmer):
return kmer[:-1]
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def build_graph(kmers):
graph = collections.defaultdict(list)
for kmer in kmers:
prefix = kmer_prefix(kmer)
suffix = kmer_suffix(kmer)
graph[prefix].append(suffix)
return graph
def find_start_vertex(graph):
counter = collections.defaultdict(lambda: 0)
for key, value in graph.items():
counter[key] += 0
if len(value) == 0:
return key
for node in value:
counter[node] += 1
counter_sort = sorted(counter.items(), key=lambda x: x[1])
return counter_sort[0][0]
def find_eulerian_tour(graph):
"""
stack St;
в St кладём любую вершину (стартовая вершина);
пока St не пустой
пусть V - значение на вершине St;
если степень(V) = 0, то
добавляем V к ответу;
снимаем V с вершины St;
иначе
находим любое ребро, выходящее из V;
удаляем его из графа;
второй конец этого ребра кладём в St;
"""
ans = []
stack = [find_start_vertex(graph)]
while stack:
curr_v = stack[-1]
if len(graph[curr_v]) == 0:
ans.append(curr_v)
stack.pop()
else:
next_v = graph[curr_v].pop()
stack.append(next_v)
return list(reversed(ans))
def dna_reconstruction(k, dna):
kmers = [x for x in chunks(dna, k)]
graph = build_graph(kmers)
path = find_eulerian_tour(graph)
result = [x[0] for x in path] + [path[-1][1:]]
return "".join(result)
@click.command()
@click.option(
"--fin",
type=str,
default="problem11_input.tsv")
def main(fin):
df = pd.read_csv(fin, sep="\t")
assert all(x in df.columns.values.tolist() for x in ["k", "dna"])
for i, row in df.iterrows():
print(dna_reconstruction(row["k"], row["dna"]))
if __name__ == '__main__':
main()
| [
"click.option",
"click.command",
"collections.defaultdict",
"pandas.read_csv"
] | [((1789, 1804), 'click.command', 'click.command', ([], {}), '()\n', (1802, 1804), False, 'import click\n'), ((1806, 1868), 'click.option', 'click.option', (['"""--fin"""'], {'type': 'str', 'default': '"""problem11_input.tsv"""'}), "('--fin', type=str, default='problem11_input.tsv')\n", (1818, 1868), False, 'import click\n'), ((310, 339), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (333, 339), False, 'import collections\n'), ((533, 568), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (556, 568), False, 'import collections\n'), ((1906, 1932), 'pandas.read_csv', 'pd.read_csv', (['fin'], {'sep': '"""\t"""'}), "(fin, sep='\\t')\n", (1917, 1932), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
import sys
import re
import time
import datetime
import os
for module in sorted(sys.modules):
print("%-20s : %s" % (module, sys.modules[module]))
print('USER : ', os.environ['USER'])
print('PWD : ', os.environ['PWD'])
print('PYTHONPATH: ', os.environ.get('PYTHONPATH'))
print(sys.path)
| [
"os.environ.get"
] | [((281, 309), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""'], {}), "('PYTHONPATH')\n", (295, 309), False, 'import os\n')] |
# SPDX-FileCopyrightText: 2020 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: Apache-2.0
from unittest import mock
import pytest
from codeprep.bpepkg.bpe_config import BpeConfig, BpeParam, BpeConfigNotSupported
from codeprep.pipeline.bpelearner import run
@mock.patch('codeprep.pipeline.bpelearner.Dataset', autospec=True)
def test_run_word_end(mocked_dataset):
bpe_config = BpeConfig({
BpeParam.BASE: 'code',
BpeParam.WORD_END: True,
BpeParam.UNICODE: 'yes',
BpeParam.CASE: 'yes'
})
with pytest.raises(BpeConfigNotSupported):
run(mocked_dataset, 1, bpe_config)
@mock.patch('codeprep.pipeline.bpelearner.Dataset', autospec=True)
def test_run_bytes_bpe(mocked_dataset):
bpe_config = BpeConfig({
BpeParam.BASE: 'code',
BpeParam.WORD_END: False,
BpeParam.UNICODE: 'bytes',
BpeParam.CASE: 'yes'
})
with pytest.raises(BpeConfigNotSupported):
run(mocked_dataset, 1, bpe_config) | [
"codeprep.pipeline.bpelearner.run",
"codeprep.bpepkg.bpe_config.BpeConfig",
"unittest.mock.patch",
"pytest.raises"
] | [((261, 326), 'unittest.mock.patch', 'mock.patch', (['"""codeprep.pipeline.bpelearner.Dataset"""'], {'autospec': '(True)'}), "('codeprep.pipeline.bpelearner.Dataset', autospec=True)\n", (271, 326), False, 'from unittest import mock\n'), ((621, 686), 'unittest.mock.patch', 'mock.patch', (['"""codeprep.pipeline.bpelearner.Dataset"""'], {'autospec': '(True)'}), "('codeprep.pipeline.bpelearner.Dataset', autospec=True)\n", (631, 686), False, 'from unittest import mock\n'), ((383, 493), 'codeprep.bpepkg.bpe_config.BpeConfig', 'BpeConfig', (["{BpeParam.BASE: 'code', BpeParam.WORD_END: True, BpeParam.UNICODE: 'yes',\n BpeParam.CASE: 'yes'}"], {}), "({BpeParam.BASE: 'code', BpeParam.WORD_END: True, BpeParam.UNICODE:\n 'yes', BpeParam.CASE: 'yes'})\n", (392, 493), False, 'from codeprep.bpepkg.bpe_config import BpeConfig, BpeParam, BpeConfigNotSupported\n'), ((744, 858), 'codeprep.bpepkg.bpe_config.BpeConfig', 'BpeConfig', (["{BpeParam.BASE: 'code', BpeParam.WORD_END: False, BpeParam.UNICODE: 'bytes',\n BpeParam.CASE: 'yes'}"], {}), "({BpeParam.BASE: 'code', BpeParam.WORD_END: False, BpeParam.\n UNICODE: 'bytes', BpeParam.CASE: 'yes'})\n", (753, 858), False, 'from codeprep.bpepkg.bpe_config import BpeConfig, BpeParam, BpeConfigNotSupported\n'), ((537, 573), 'pytest.raises', 'pytest.raises', (['BpeConfigNotSupported'], {}), '(BpeConfigNotSupported)\n', (550, 573), False, 'import pytest\n'), ((583, 617), 'codeprep.pipeline.bpelearner.run', 'run', (['mocked_dataset', '(1)', 'bpe_config'], {}), '(mocked_dataset, 1, bpe_config)\n', (586, 617), False, 'from codeprep.pipeline.bpelearner import run\n'), ((901, 937), 'pytest.raises', 'pytest.raises', (['BpeConfigNotSupported'], {}), '(BpeConfigNotSupported)\n', (914, 937), False, 'import pytest\n'), ((947, 981), 'codeprep.pipeline.bpelearner.run', 'run', (['mocked_dataset', '(1)', 'bpe_config'], {}), '(mocked_dataset, 1, bpe_config)\n', (950, 981), False, 'from codeprep.pipeline.bpelearner import run\n')] |
#!/usr/bin/env python3
import torrent_parser as tp
import asyncio
import contextlib
import pathlib
import argparse
import pprint
import hashlib
import concurrent.futures
import os.path
import logging
import tqdm
class TorrentChecker(object):
def __init__(self, datadir=pathlib.Path('.'), data_file_globs=["**"],
checkers=None, pieces=None):
self._data_file_globs = data_file_globs
self._datadir = datadir
self._checkers = checkers
self._pieces = pieces
self._logger = logging.getLogger("TorrentChecker")
self._cancelled = False
def _IsWantedDataFile(self, paths):
for glob in self._data_file_globs:
for path in paths:
if path.match(glob):
return True
return False
def _RaiseIfCancelled(self):
if self._cancelled:
raise asyncio.CancelledError()
def _GetPieceHash(self, datadir, piece_index, piece_len, paths, offset):
first_time = True
bytes_remaining = piece_len
hasher = hashlib.sha1()
for path in paths:
full_path = datadir.joinpath(path)
#logging.debug("Hashing piece %d in file %s", piece_index, path)
if bytes_remaining == 0:
raise ValueError(
"Too many paths passed into Check for piece size {}: {!r}".format(
piece_len, paths))
with open(full_path, "rb") as fobj:
if first_time:
fobj.seek(offset)
first_time = False
while bytes_remaining != 0:
self._RaiseIfCancelled()
data = fobj.read(bytes_remaining)
if not data:
break
hasher.update(data)
bytes_remaining -= len(data)
return hasher.hexdigest()
def _Check(self, datadir, piece_index, piece_sha1, piece_len, paths, offset):
if self._pieces and piece_index not in self._pieces:
#self._logger.warning('skipped %d', piece_index)
return
sha1 = self._GetPieceHash(datadir, piece_index, piece_len, paths, offset)
if piece_sha1 == sha1:
#logging.info(
# ("Piece %d (len %d) verifies correctly with hash %r, containing files\n"
# "%s"),
# piece_index, piece_len, sha1, paths)
pass
else:
self._logger.warning(
("Piece %d (len %d) containing files %r (offset %d) does not verify."
"\n expected: %r != actual: %r"),
piece_index, piece_len, paths, offset, piece_sha1, sha1)
def _CollectPieces(self, piece_len, pieces, file_infos):
file_infos_iter = iter(file_infos)
cur_file_info = next(file_infos_iter)
prev_offset = 0
#logging.debug("piece_len = %d", piece_len)
for piece_index, piece_sha1 in enumerate(pieces):
offset = prev_offset
bytes_covered_total = 0
piece_paths = []
while bytes_covered_total < piece_len:
#path = os.path.join(datadir, *cur_file_info['path'])
path = pathlib.PurePath(*cur_file_info['path'])
piece_paths.append(path)
size = cur_file_info['length']
effective_size = size - offset
newly_covered_bytes = min(piece_len - bytes_covered_total, effective_size)
bytes_covered_total += newly_covered_bytes
offset += newly_covered_bytes
#logging.debug("piece = %d, offset = %d, bct = %d, size = %d",
#piece_index, offset,
#bytes_covered_total, size)
if offset == size:
#logging.debug("resetting offset")
offset = 0
try:
cur_file_info = next(file_infos_iter)
except StopIteration:
break
#logging.debug("bct = %d", bytes_covered_total)
#logging.debug(
# "yielding (%d, %r, %r, %d)", piece_index, piece_sha1, piece_paths,
# prev_offset)
yield (piece_index, piece_sha1, piece_paths, prev_offset)
prev_offset = offset
def CheckTorrent(self, torrent_file):
parsed = tp.parse_torrent_file(torrent_file)
info = parsed['info']
piece_len = info['piece length']
pieces = info['pieces']
file_infos = None
torrent_name = info['name']
if 'files' in info:
file_infos = info['files']
else:
file_infos = [info]
info['path'] = [f'{self._datadir}/{torrent_name}']
datadir = pathlib.Path(self._datadir, torrent_name)
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._checkers) as executor:
futures = []
try:
for piece_index, piece_sha1, piece_paths, offset in self._CollectPieces(
piece_len, pieces, file_infos):
if not self._IsWantedDataFile(piece_paths):
#logging.debug(
# "Skipping files which matched no data_file_globs: %r",
# piece_paths)
continue
futures.append(
executor.submit(
TorrentChecker._Check, self, datadir, piece_index, piece_sha1,
piece_len, piece_paths, offset))
for future in tqdm.tqdm(
concurrent.futures.as_completed(futures), total=len(futures),
unit='piece', dynamic_ncols=True, leave=False):
future.result()
except:
self._logger.warning("Cancelling pending work")
for future in futures:
future.cancel()
self._cancelled = True
raise
def main():
parser = argparse.ArgumentParser(description='Verify downloaded torrents')
parser.add_argument('torrent_file', type=str)
parser.add_argument('data_file_globs', nargs='+', type=str, default=["**"])
parser.add_argument('--checkers', default=None, type=int)
parser.add_argument('--loglevel', default=None, type=str)
parser.add_argument('--datadir', default=pathlib.Path('.'), type=pathlib.Path)
parser.add_argument('--pieces', default=None, type=str)
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper()))
pieces = None
if args.pieces:
pieces = args.pieces.split('-')
if len(pieces) == 1:
pieces = int(pieces[0])
pieces = range(pieces, pieces + 1)
else:
pieces = range(int(pieces[0]), int(pieces[1]))
checker = TorrentChecker(
data_file_globs=args.data_file_globs,
datadir=args.datadir,
checkers=args.checkers,
pieces=pieces)
checker.CheckTorrent(args.torrent_file)
if __name__ == '__main__':
main()
# vim: set et ts=2 sw=2 sts=2
| [
"logging.getLogger",
"asyncio.CancelledError",
"torrent_parser.parse_torrent_file",
"argparse.ArgumentParser",
"pathlib.Path",
"pathlib.PurePath",
"hashlib.sha1"
] | [((5286, 5351), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Verify downloaded torrents"""'}), "(description='Verify downloaded torrents')\n", (5309, 5351), False, 'import argparse\n'), ((272, 289), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (284, 289), False, 'import pathlib\n'), ((506, 541), 'logging.getLogger', 'logging.getLogger', (['"""TorrentChecker"""'], {}), "('TorrentChecker')\n", (523, 541), False, 'import logging\n'), ((977, 991), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (989, 991), False, 'import hashlib\n'), ((3866, 3901), 'torrent_parser.parse_torrent_file', 'tp.parse_torrent_file', (['torrent_file'], {}), '(torrent_file)\n', (3887, 3901), True, 'import torrent_parser as tp\n'), ((4212, 4253), 'pathlib.Path', 'pathlib.Path', (['self._datadir', 'torrent_name'], {}), '(self._datadir, torrent_name)\n', (4224, 4253), False, 'import pathlib\n'), ((809, 833), 'asyncio.CancelledError', 'asyncio.CancelledError', ([], {}), '()\n', (831, 833), False, 'import asyncio\n'), ((5641, 5658), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (5653, 5658), False, 'import pathlib\n'), ((2864, 2904), 'pathlib.PurePath', 'pathlib.PurePath', (["*cur_file_info['path']"], {}), "(*cur_file_info['path'])\n", (2880, 2904), False, 'import pathlib\n')] |
import os
_rootdir = os.getcwd()
def find_rootdir(filenames = ('__main__.py', 'main.ipynb')):
path = os.getcwd()
while os.path.isdir(path):
ls = os.listdir(path)
if any([f in ls for f in filenames]):
return os.path.abspath(path)
else:
path += '/..'
# nothing found: using the current working dir
return os.getcwd()
def set_rootdir(path=None):
global _rootdir
if path and os.path.isdir(path):
_rootdir = os.path.abspath(path)
else:
_rootdir = find_rootdir()
return _rootdir
def rootdir():
return _rootdir
| [
"os.path.abspath",
"os.listdir",
"os.path.isdir",
"os.getcwd"
] | [((22, 33), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (31, 33), False, 'import os\n'), ((107, 118), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (116, 118), False, 'import os\n'), ((130, 149), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (143, 149), False, 'import os\n'), ((371, 382), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (380, 382), False, 'import os\n'), ((164, 180), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (174, 180), False, 'import os\n'), ((449, 468), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (462, 468), False, 'import os\n'), ((489, 510), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (504, 510), False, 'import os\n'), ((246, 267), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (261, 267), False, 'import os\n')] |
import math
import random
import numpy as np
# 先生成一个随机的信源
def random_sources():
random_sources = random.randint(0, 16)
print('这个随机数是', random_sources)
return hanming(random_sources)
# return bin(int(random_sources))
# 进行编码,使用异或规则生成有校验位的(7,4)汉明码字
# def hanming(code_0):
# # 把十进制的数字转变成二进制
# code1 = bin(int(code_0))
# code = str(code1)[2:]
# print('{0}变成二进制'.format(code_0), code)
# # # 判断待验证位数是否达到4位,不足位数前面补0
# while len(code) < 4:
# code = '0' + code
# # 将码字转变成列表格式,方便后面进行操作
# # print '补齐4位之后',code
# code_list = list(code)
# # 编码结构即码字,对于(7,4)线性分组码汉明码而言
# code_1 = int(code_list[0]) ^ int(code_list[2]) ^ int(code_list[3])
# code_2 = int(code_list[0]) ^ int(code_list[1]) ^ int(code_list[2])
# code_4 = int(code_list[1]) ^ int(code_list[2]) ^ int(code_list[3])
# code_list.insert(0, str(code_1))
# code_list.insert(1, str(code_2))
# code_list.insert(2, str(code_4))
# hanming_code = ''.join(code_list)
# print('生成的(7,4)汉明码字:' + hanming_code)
# return code_list
def hanming(code_0):
# 把十进制的数字转变成二进制
code1 = bin(int(code_0))
code = str(code1)[2:]
print('{0}变成二进制'.format(code_0), code)
# # 判断待验证位数是否达到4位,不足位数前面补0
while len(code) < 4:
code = '0' + code
# 将码字转变成列表格式,方便后面进行操作
# print '补齐4位之后',code
code_list = list(code)
# 编码结构即码字,对于(7,4)线性分组码汉明码而言
code_1 = int(code_list[0]) ^ int(code_list[1]) ^ int(code_list[3]) ^ 1
code_2 = int(code_list[0]) ^ int(code_list[2]) ^ int(code_list[3]) ^ 1
code_4 = int(code_list[1]) ^ int(code_list[2]) ^ int(code_list[3]) ^ 1
code_list.insert(0, str(code_1))
code_list.insert(1, str(code_2))
code_list.insert(3, str(code_4))
hanming_code = ''.join(code_list)
print('生成的(7,4)汉明码字:' + hanming_code)
return code_list
if __name__ == '__main__':
# x是原始信号,生成的(7,4)汉明码
# x1 = random_sources()
x1 = hanming(3)
print(x1)
| [
"random.randint"
] | [((191, 212), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (205, 212), False, 'import random\n')] |
from django.db import models
from users.models import User
class Assignment(models.Model):
title = models.CharField(max_length=50)
teacher = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
class GradedAssignment(models.Model):
student = models.ForeignKey(User, on_delete=models.CASCADE)
assignment = models.ForeignKey(Assignment,
on_delete=models.SET_NULL,
blank=True,
null=True)
grade = models.FloatField()
def __str__(self):
return self.student.username
class Choice(models.Model):
title = models.CharField(max_length=50)
def __str__(self):
return self.title
class Question(models.Model):
question = models.CharField(max_length=200)
choices = models.ManyToManyField(Choice)
answer = models.ForeignKey(Choice,
on_delete=models.CASCADE,
related_name='answer',
blank=True,
null=True)
assignment = models.ForeignKey(Assignment,
on_delete=models.CASCADE,
related_name='questions',
blank=True,
null=True)
order = models.SmallIntegerField()
def __str__(self):
return self.question
| [
"django.db.models.FloatField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.SmallIntegerField",
"django.db.models.CharField"
] | [((105, 136), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (121, 136), False, 'from django.db import models\n'), ((151, 200), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (168, 200), False, 'from django.db import models\n'), ((305, 354), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (322, 354), False, 'from django.db import models\n'), ((372, 451), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Assignment'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), '(Assignment, on_delete=models.SET_NULL, blank=True, null=True)\n', (389, 451), False, 'from django.db import models\n'), ((569, 588), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (586, 588), False, 'from django.db import models\n'), ((692, 723), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (708, 723), False, 'from django.db import models\n'), ((821, 853), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (837, 853), False, 'from django.db import models\n'), ((868, 898), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Choice'], {}), '(Choice)\n', (890, 898), False, 'from django.db import models\n'), ((912, 1013), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Choice'], {'on_delete': 'models.CASCADE', 'related_name': '"""answer"""', 'blank': '(True)', 'null': '(True)'}), "(Choice, on_delete=models.CASCADE, related_name='answer',\n blank=True, null=True)\n", (929, 1013), False, 'from django.db import models\n'), ((1151, 1260), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Assignment'], {'on_delete': 'models.CASCADE', 'related_name': '"""questions"""', 'blank': '(True)', 'null': '(True)'}), "(Assignment, on_delete=models.CASCADE, related_name=\n 'questions', blank=True, null=True)\n", (1168, 1260), False, 'from django.db import models\n'), ((1408, 1434), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {}), '()\n', (1432, 1434), False, 'from django.db import models\n')] |
from fastapi import FastAPI
from . import api
app = FastAPI(debug=True)
app.include_router(api.router) | [
"fastapi.FastAPI"
] | [((55, 74), 'fastapi.FastAPI', 'FastAPI', ([], {'debug': '(True)'}), '(debug=True)\n', (62, 74), False, 'from fastapi import FastAPI\n')] |
#!/usr/bin/env python
"""
Perform compressed sensing analysis on a dax file using the
homotopy approach. Return the results in hres image format and
as a list of object locations.
Hazen 09/12
"""
import numpy
import storm_analysis.sa_library.datareader as datareader
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.readinsight3 as readinsight3
import storm_analysis.sa_library.writeinsight3 as writeinsight3
import storm_analysis.L1H.setup_A_matrix as setup_A_matrix
import storm_analysis.L1H.homotopy_imagea_c as homotopy_imagea_c
def analyze(movie_name, settings_name, hres_name, bin_name):
movie_data = datareader.inferReader(movie_name)
#
# FIXME:
#
# This should also start at the same frame as hres in the event of a restart.
#
i3_file = writeinsight3.I3Writer(bin_name)
params = parameters.ParametersL1H().initFromFile(settings_name)
#
# Load the a matrix and setup the homotopy image analysis class.
#
a_mat_file = params.getAttr("a_matrix")
print("Using A matrix file:", a_mat_file)
a_mat = setup_A_matrix.loadAMatrix(a_mat_file)
image = movie_data.loadAFrame(0)
htia = homotopy_imagea_c.HomotopyIA(a_mat,
params.getAttr("epsilon"),
image.shape)
#
# This opens the file. If it already exists, then it sets the file pointer
# to the end of the file & returns the number of the last frame analyzed.
#
curf = htia.openHRDataFile(hres_name)
#
# Figure out which frame to start & stop at.
#
[dax_x,dax_y,dax_l] = movie_data.filmSize()
if params.hasAttr("start_frame"):
if (params.getAttr("start_frame") >= curf) and (params.getAttr("start_frame") < dax_l):
curf = params.getAttr("start_frame")
if params.hasAttr("max_frame"):
if (params.getAttr("max_frame") > 0) and (params.getAttr("max_frame") < dax_l):
dax_l = params.getAttr("max_frame")
print("Starting analysis at frame", curf)
#
# Analyze the dax data.
#
total_peaks = 0
try:
while(curf<dax_l):
# Load image, subtract baseline & remove negative values.
image = movie_data.loadAFrame(curf).astype(numpy.float)
# Convert to photo-electrons.
image -= params.getAttr("camera_offset")
image = image/params.getAttr("camera_gain")
# Remove negative values.
mask = (image < 0)
image[mask] = 0
# Analyze image.
hres_image = htia.analyzeImage(image)
peaks = htia.saveHRFrame(hres_image, curf + 1)
[cs_x,cs_y,cs_a,cs_i] = htia.getPeaks(hres_image)
i3_file.addMoleculesWithXYAItersFrame(cs_x, cs_y, cs_a, cs_i, curf+1)
peaks = cs_x.size
total_peaks += peaks
print("Frame:", curf, peaks, total_peaks)
curf += 1
except KeyboardInterrupt:
print("Analysis stopped.")
# cleanup
htia.closeHRDataFile()
i3_file.close()
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = 'L1H analysis - Babcock, Optics Express, 2013')
parser.add_argument('--movie', dest='movie', type=str, required=True,
help = "The name of the movie to analyze, can be .dax, .tiff or .spe format.")
parser.add_argument('--xml', dest='settings', type=str, required=True,
help = "The name of the settings xml file.")
parser.add_argument('--hres', dest='hres', type=str, required=True,
help = "The name of 'high resolution' output file. This a compressed version of the final image.")
parser.add_argument('--bin', dest='mlist', type=str, required=True,
help = "The name of the localizations output file. This is a binary file in Insight3 format.")
args = parser.parse_args()
analyze(args.movie, args.settings, args.hres, args.mlist)
#
# The MIT License
#
# Copyright (c) 2012 <NAME>, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| [
"storm_analysis.sa_library.parameters.ParametersL1H",
"storm_analysis.sa_library.datareader.inferReader",
"argparse.ArgumentParser",
"storm_analysis.L1H.setup_A_matrix.loadAMatrix",
"storm_analysis.sa_library.writeinsight3.I3Writer"
] | [((660, 694), 'storm_analysis.sa_library.datareader.inferReader', 'datareader.inferReader', (['movie_name'], {}), '(movie_name)\n', (682, 694), True, 'import storm_analysis.sa_library.datareader as datareader\n'), ((823, 855), 'storm_analysis.sa_library.writeinsight3.I3Writer', 'writeinsight3.I3Writer', (['bin_name'], {}), '(bin_name)\n', (845, 855), True, 'import storm_analysis.sa_library.writeinsight3 as writeinsight3\n'), ((1114, 1152), 'storm_analysis.L1H.setup_A_matrix.loadAMatrix', 'setup_A_matrix.loadAMatrix', (['a_mat_file'], {}), '(a_mat_file)\n', (1140, 1152), True, 'import storm_analysis.L1H.setup_A_matrix as setup_A_matrix\n'), ((3192, 3280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""L1H analysis - Babcock, Optics Express, 2013"""'}), "(description=\n 'L1H analysis - Babcock, Optics Express, 2013')\n", (3215, 3280), False, 'import argparse\n'), ((874, 900), 'storm_analysis.sa_library.parameters.ParametersL1H', 'parameters.ParametersL1H', ([], {}), '()\n', (898, 900), True, 'import storm_analysis.sa_library.parameters as parameters\n')] |
"""
Methods for handling DB creation and CRUD operations in Sqlite3.
"""
# Standard library imports
import logging
import sqlite3
# Local application imports
from ism.exceptions.exceptions import UnrecognisedParameterisationCharacter
from ism.interfaces.dao_interface import DAOInterface
class Sqlite3DAO(DAOInterface):
"""Implements Methods for handling DB creation and CRUD operations against SQLITE3"""
def __init__(self, *args):
self.db_path = args[0]['database']['db_path']
self.raise_on_sql_error = args[0].get('database', {}).get('raise_on_sql_error', False)
self.logger = logging.getLogger('ism.sqlite3_dao.Sqlite3DAO')
self.logger.info('Initialising Sqlite3DAO.')
self.cnx = None
def close_connection(self):
if self.cnx:
self.cnx.close()
def create_database(self, *args):
"""Calling open_connection creates the database in SQLITE3
Seems redundant but is useful to honour the interface.
"""
self.open_connection(*args)
self.close_connection()
def execute_sql_query(self, sql, params=()):
"""Execute a SQL query and return the result.
@:param query. { sql: 'SELECT ...', params: params
"""
try:
self.open_connection()
cursor = self.cnx.cursor()
cursor.execute(sql, params)
rows = cursor.fetchall()
self.close_connection()
return rows
except sqlite3.Error as e:
logging.error(f'Error executing sql query ({sql}) ({params}): {e}')
if self.raise_on_sql_error:
raise e
def execute_sql_statement(self, sql, params=()):
"""Execute a SQL statement and return the exit code"""
try:
self.open_connection()
cursor = self.cnx.cursor()
cursor.execute(sql, params)
self.cnx.commit()
self.close_connection()
except sqlite3.Error as e:
logging.error(f'Error executing sql query ({sql}) ({params}): {e}')
if self.raise_on_sql_error:
raise e
def open_connection(self, *args) -> sqlite3.Connection:
"""Creates a database connection.
Opens a SQLITE3 database connection and returns a connector.
"""
try:
self.cnx = sqlite3.connect(self.db_path)
return self.cnx
except sqlite3.Error as error:
self.logger.error("Error while connecting to Sqlite3 database.", error)
@staticmethod
def prepare_parameterised_statement(sql: str) -> str:
"""Prepare a parameterised sql statement for this RDBMS.
Third party developers will want to use the DAO to run CRUD
operations against the DB, but we support multiple RDBMS. e.g.
MySql: INSERT INTO Employee
(id, Name, Joining_date, salary) VALUES (%s,%s,%s,%s)
Sqlite3: INSERT INTO Employee
(id, Name, Joining_date, salary) VALUES (?,?,?,?)
This method ensures that the parameterisation is set correctly
for the RDBMS in use. Method doesn't use very vigorous checking but
as this should only be an issue while developing a new action pack
it should be sufficient for now.
"""
if '%s' in sql:
return sql.replace('%s', '?')
elif '?' in sql:
return sql
else:
raise UnrecognisedParameterisationCharacter(
f'Parameterisation character not recognised / found in SQL string ({sql})'
)
| [
"logging.getLogger",
"logging.error",
"sqlite3.connect",
"ism.exceptions.exceptions.UnrecognisedParameterisationCharacter"
] | [((617, 664), 'logging.getLogger', 'logging.getLogger', (['"""ism.sqlite3_dao.Sqlite3DAO"""'], {}), "('ism.sqlite3_dao.Sqlite3DAO')\n", (634, 664), False, 'import logging\n'), ((2365, 2394), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (2380, 2394), False, 'import sqlite3\n'), ((1523, 1590), 'logging.error', 'logging.error', (['f"""Error executing sql query ({sql}) ({params}): {e}"""'], {}), "(f'Error executing sql query ({sql}) ({params}): {e}')\n", (1536, 1590), False, 'import logging\n'), ((2012, 2079), 'logging.error', 'logging.error', (['f"""Error executing sql query ({sql}) ({params}): {e}"""'], {}), "(f'Error executing sql query ({sql}) ({params}): {e}')\n", (2025, 2079), False, 'import logging\n'), ((3476, 3594), 'ism.exceptions.exceptions.UnrecognisedParameterisationCharacter', 'UnrecognisedParameterisationCharacter', (['f"""Parameterisation character not recognised / found in SQL string ({sql})"""'], {}), "(\n f'Parameterisation character not recognised / found in SQL string ({sql})')\n", (3513, 3594), False, 'from ism.exceptions.exceptions import UnrecognisedParameterisationCharacter\n')] |
# Author: btjanaka (<NAME>)
# Problem: (UVa) 247
import sys
from collections import defaultdict
def kosaraju(g, g_rev):
order = []
visited = set()
def visit(u):
visited.add(u)
for v in g[u]:
if v not in visited:
visit(v)
order.append(u)
for u in g:
if u not in visited: visit(u)
components = []
visited.clear()
def build_comp(u):
components[-1].append(u)
visited.add(u)
for v in g_rev[u]:
if v not in visited:
build_comp(v)
for u in order[::-1]:
if u not in visited:
components.append([])
build_comp(u)
return components
def main():
case = 1
while True:
# input
n, m = map(int, input().split())
if n == 0 and m == 0: break
g, g_rev = defaultdict(set), defaultdict(set)
for _ in range(m):
u, v = input().strip().split()
g[u].add(v)
g[v]
g_rev[v].add(u)
g_rev[u]
# output
if case != 1: print()
print(f"Calling circles for data set {case}:")
for c in kosaraju(g, g_rev):
print(", ".join(c))
case += 1
main()
| [
"collections.defaultdict"
] | [((865, 881), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (876, 881), False, 'from collections import defaultdict\n'), ((883, 899), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (894, 899), False, 'from collections import defaultdict\n')] |
import pyautogui
import time
time.sleep(3)
print(pyautogui.position()) | [
"pyautogui.position",
"time.sleep"
] | [((30, 43), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (40, 43), False, 'import time\n'), ((50, 70), 'pyautogui.position', 'pyautogui.position', ([], {}), '()\n', (68, 70), False, 'import pyautogui\n')] |
from jmap.account.imap.imap_utf7 import imap_utf7_decode, imap_utf7_encode
KNOWN_SPECIALS = set('\\HasChildren \\HasNoChildren \\NoSelect \\NoInferiors \\UnMarked \\Subscribed'.lower().split())
# special use or name magic
ROLE_MAP = {
'inbox': 'inbox',
'drafts': 'drafts',
'draft': 'drafts',
'draft messages': 'drafts',
'bulk': 'junk',
'bulk mail': 'junk',
'junk': 'junk',
'junk mail': 'junk',
'spam mail': 'junk',
'spam messages': 'junk',
'archive': 'archive',
'sent': 'sent',
'sent items': 'sent',
'sent messages': 'sent',
'deleted messages': 'trash',
'trash': 'trash',
'\\inbox': 'inbox',
'\\trash': 'trash',
'\\sent': 'sent',
'\\junk': 'junk',
'\\spam': 'junk',
'\\archive': 'archive',
'\\drafts': 'drafts',
'\\all': 'all',
}
class ImapMailbox(dict):
__slots__ = ('db',)
def __missing__(self, key):
return getattr(self, key)()
def name(self):
try:
parentname, name = self['imapname'].rsplit(self['sep'], maxsplit=1)
except ValueError:
name = self['imapname']
self['name'] = imap_utf7_decode(name.encode())
return self['name']
def parentId(self):
try:
parentname, name = self['imapname'].rsplit(self['sep'], maxsplit=1)
self['parentId'] = self.db.byimapname[parentname]['id']
except ValueError:
self['parentId'] = None
return self['parentId']
def role(self):
for f in self['flags']:
if f not in KNOWN_SPECIALS:
self['role'] = ROLE_MAP.get(f, None)
break
else:
self['role'] = ROLE_MAP.get(self['imapname'].lower(), None)
return self['role']
def sortOrder(self):
return 2 if self['role'] else (1 if self['role'] == 'inbox' else 3)
def isSubscribed(self):
return '\\subscribed' in self['flags']
def totalEmails(self):
return 0
def unreadEmails(self):
return 0
def totalThreads(self):
return self['totalEmails']
def unreadThreads(self):
return self['unreadEmails']
def myRights(self):
can_select = '\\noselect' not in self['flags']
self['myRights'] = {
'mayReadItems': can_select,
'mayAddItems': can_select,
'mayRemoveItems': can_select,
'maySetSeen': can_select,
'maySetKeywords': can_select,
'mayCreateChild': True,
'mayRename': False if self['role'] else True,
'mayDelete': False if self['role'] else True,
'maySubmit': can_select,
}
return self['myRights']
def imapname(self):
encname = imap_utf7_encode(self['name']).decode()
if self['parentId']:
parent = self.db.mailboxes[self['parentId']]
self['imapname'] = parent['imapname'] + parent['sep'] + encname
else:
self['imapname'] = encname
return self['imapname']
def created(self):
return self['uidvalidity']
def updated(self):
return self['uidvalidity'] * self['uidnext']
def deleted(self):
return None
| [
"jmap.account.imap.imap_utf7.imap_utf7_encode"
] | [((2719, 2749), 'jmap.account.imap.imap_utf7.imap_utf7_encode', 'imap_utf7_encode', (["self['name']"], {}), "(self['name'])\n", (2735, 2749), False, 'from jmap.account.imap.imap_utf7 import imap_utf7_decode, imap_utf7_encode\n')] |
from pyieee1905.ieee1905_tlv import IEEE1905_TLV
from scapy.packet import Packet, bind_layers
from scapy.fields import BitField, XByteField, XShortField, XShortEnumField
from scapy.layers.l2 import Ether
IEEE1905_MCAST = "01:80:c2:00:00:13"
ieee1905_msg_type = {
0x0000:"TOPOLOGY_DISCOVERY_MESSAGE",
0x0001:"TOPOLOGY_NOTIFICATION_MESSAGE",
0x0002:"TOPOLOGY_QUERY_MESSAGE",
0x0003:"TOPOLOGY_RESPONSE_MESSAGE",
0x0004:"VENDOR_SPECIFIC_MESSAGE",
0x0005:"LINK_METRIC_QUERY_MESSAGE",
0x0006:"LINK_METRIC_RESPONSE_MESSAGE",
0x0007:"AP_AUTOCONFIGURATION_SEARCH_MESSAGE",
0x0008:"AP_AUTOCONFIGURATION_RESPONSE_MESSAGE",
0x0009:"AP_AUTOCONFIGURATION_WSC_MESSAGE",
0x000A:"AP_AUTOCONFIGURATION_RENEW_MESSAGE",
0x000B:"IEEE1905_PUSH_BUTTON_EVENT_NOTIFICATION_MESSAGE",
0x000C:"IEEE1905_PUSH_BUTTON_JOIN_NOTIFICATION_MESSAGE",
0x000D:"HIGHER_LAYER_QUERY_MESSAGE",
0x000E:"HIGHER_LAYER_RESPONSE_MESSAGE",
0x000F:"INTERFACE_POWER_CHANGE_REQUEST_MESSAGE",
0x0010:"INTERFACE_POWER_CHANGE_RESPONSE_MESSAGE",
0x0011:"GENERIC_PHY_QUERY_MESSAGE",
0x0012:"GENERIC_PHY_RESPONSE_MESSAGE",
0x8000:"IEEE1905_ACK_MESSAGE",
0x8001:"AP_CAPABILITY_QUERY_MESSAGE",
0x8002:"AP_CAPABILITY_REPORT_MESSAGE",
0x8003:"MULTI_AP_POLICY_CONFIG_REQUEST_MESSAGE",
0x8004:"CHANNEL_PREFERENCE_QUERY_MESSAGE",
0x8005:"CHANNEL_PREFERENCE_REPORT_MESSAGE",
0x8006:"CHANNEL_SELECTION_REQUEST_MESSAGE",
0x8007:"CHANNEL_SELECTION_RESPONSE_MESSAGE",
0x8008:"OPERATING_CHANNEL_REPORT_MESSAGE",
0x8009:"CLIENT_CAPABILITIES_QUERY_MESSAGE",
0x800A:"CLIENT_CAPABILITIES_REPORT_MESSAGE",
0x800B:"AP_METRICS_QUERY_MESSAGE",
0x800C:"AP_METRICS_RESPONSE_MESSAGE",
0x800D:"ASSOCIATED_STA_LINK_METRICS_QUERY_MESSAGE",
0x800E:"ASSOCIATED_STA_LINK_METRICS_RESPONSE_MESSAGE",
0x800F:"UNASSOCIATED_STA_LINK_METRICS_QUERY_MESSAGE",
0x8010:"UNASSOCIATED_STA_LINK_METRICS_RESPONSE_MESSAGE",
0x8011:"BEACON_METRICS_QUERY_MESSAGE",
0x8012:"BEACON_METRICS_REPONSE_METRICS",
0x8013:"COMBINED_INFRASTRUCTURE_METRICS_MESSAGE",
0x8014:"CLIENT_STEERING_REQUEST_MESSAGE",
0x8015:"CLIENT_STEERING_BTM_REPORT_MESSAGE",
0x8016:"CLIENT_ASSOCIATION_CONTROL_REQUEST_MESSAGE",
0x8017:"STEERING_COMPLETED_MESSAGE",
0x8018:"HIGHER_LAYER_DATA_MESSAGE",
0x8019:"BACKHAUL_STEERING_REQUEST_MESSAGE",
0x801A:"BACKHAUL_STEERING_RESPONSE_MESSAGE"
}
class MultiAP_Message(Packet):
name = "IEEE 1905 MultiAP Message"
fields_desc = [
XByteField("msg_version", None),
XByteField("msg_reserved", None),
XShortEnumField("msg_type", None, ieee1905_msg_type),
XShortField("msg_id", None),
XByteField("frag_id", None),
BitField("flag_last_frag_ind", 0, 1),
BitField("flag_relay_ind", 0, 1),
BitField("flag_reserved", 0, 6)
]
bind_layers(Ether, MultiAP_Message, type=0x893a)
bind_layers(MultiAP_Message, IEEE1905_TLV, )
| [
"scapy.packet.bind_layers",
"scapy.fields.BitField",
"scapy.fields.XShortField",
"scapy.fields.XByteField",
"scapy.fields.XShortEnumField"
] | [((2890, 2937), 'scapy.packet.bind_layers', 'bind_layers', (['Ether', 'MultiAP_Message'], {'type': '(35130)'}), '(Ether, MultiAP_Message, type=35130)\n', (2901, 2937), False, 'from scapy.packet import Packet, bind_layers\n'), ((2939, 2981), 'scapy.packet.bind_layers', 'bind_layers', (['MultiAP_Message', 'IEEE1905_TLV'], {}), '(MultiAP_Message, IEEE1905_TLV)\n', (2950, 2981), False, 'from scapy.packet import Packet, bind_layers\n'), ((2543, 2574), 'scapy.fields.XByteField', 'XByteField', (['"""msg_version"""', 'None'], {}), "('msg_version', None)\n", (2553, 2574), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2584, 2616), 'scapy.fields.XByteField', 'XByteField', (['"""msg_reserved"""', 'None'], {}), "('msg_reserved', None)\n", (2594, 2616), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2626, 2678), 'scapy.fields.XShortEnumField', 'XShortEnumField', (['"""msg_type"""', 'None', 'ieee1905_msg_type'], {}), "('msg_type', None, ieee1905_msg_type)\n", (2641, 2678), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2688, 2715), 'scapy.fields.XShortField', 'XShortField', (['"""msg_id"""', 'None'], {}), "('msg_id', None)\n", (2699, 2715), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2725, 2752), 'scapy.fields.XByteField', 'XByteField', (['"""frag_id"""', 'None'], {}), "('frag_id', None)\n", (2735, 2752), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2762, 2798), 'scapy.fields.BitField', 'BitField', (['"""flag_last_frag_ind"""', '(0)', '(1)'], {}), "('flag_last_frag_ind', 0, 1)\n", (2770, 2798), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2808, 2840), 'scapy.fields.BitField', 'BitField', (['"""flag_relay_ind"""', '(0)', '(1)'], {}), "('flag_relay_ind', 0, 1)\n", (2816, 2840), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n'), ((2850, 2881), 'scapy.fields.BitField', 'BitField', (['"""flag_reserved"""', '(0)', '(6)'], {}), "('flag_reserved', 0, 6)\n", (2858, 2881), False, 'from scapy.fields import BitField, XByteField, XShortField, XShortEnumField\n')] |
"""
Command line management utilities for ITEEBot. This module's command line
interface will act as the bot's entry point when installed.
"""
import click
from . import configurator as conf
from . import database as db
from .bot import ITEEBot
@click.group()
def cli():
pass
@click.command("init-config")
@click.argument(
"config_path",
default="instance/config.json",
type=click.Path()
)
def create_config(config_path):
"""
Command for writing or updating a configuration file. Configuration file
path will default to instance/config.json.
* config_path (str) - Path to the configuration file
Example:
iteebot init-config /home/donkey/.iteebot/config.json
"""
conf.write_config_file(config_path)
@click.command("init-db")
@click.argument(
"config_path",
default="instance/config.json",
type=click.Path(exists=True)
)
def init_db(config_path):
"""
Initializes a database to the location defined in the configuration's DB
option.
* config_path (str) - Path to the configuration file
Example:
iteebot init-db /home/donkey/.iteebot/config.json
"""
config = conf.load_config(config_path)
db.init_db(config["DB"])
@click.command("run")
@click.option("--debug", default=False, help="Run in debug mode")
@click.argument(
"config_path",
default="instance/config.json",
type=click.Path(exists=True)
)
def run(debug, config_path):
"""
Runs the bot using configuration frome the specific location (or default
of instance/config.json). Optional debug flag can be set to run in debug
mode, which will print logs to stdout instead of using log files.
* config_path (str) - Path to the configuration file
* debug (bool) - Run in debug mode
Example:
iteebot run --debug /home/donkey/.iteebot/config.json
"""
config = conf.load_config(config_path)
bot = ITEEBot(config, debug)
bot.run()
cli.add_command(create_config)
cli.add_command(init_db)
cli.add_command(run)
if __name__ == "__main__":
cli()
| [
"click.group",
"click.option",
"click.command",
"click.Path"
] | [((247, 260), 'click.group', 'click.group', ([], {}), '()\n', (258, 260), False, 'import click\n'), ((283, 311), 'click.command', 'click.command', (['"""init-config"""'], {}), "('init-config')\n", (296, 311), False, 'import click\n'), ((770, 794), 'click.command', 'click.command', (['"""init-db"""'], {}), "('init-db')\n", (783, 794), False, 'import click\n'), ((1239, 1259), 'click.command', 'click.command', (['"""run"""'], {}), "('run')\n", (1252, 1259), False, 'import click\n'), ((1261, 1325), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'help': '"""Run in debug mode"""'}), "('--debug', default=False, help='Run in debug mode')\n", (1273, 1325), False, 'import click\n'), ((393, 405), 'click.Path', 'click.Path', ([], {}), '()\n', (403, 405), False, 'import click\n'), ((876, 899), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (886, 899), False, 'import click\n'), ((1407, 1430), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1417, 1430), False, 'import click\n')] |