max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
setup.py | chadoneba/django-planfix | 1 | 6622751 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='django-planfix',
version='0.4',
description='Add contanct and task to Planfix',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/chadoneba/django-planfix',
long_description=open('README.rst', 'r').read(),
packages=[
'planfix','planfix.management.commands','planfix.migrations','planfix.management'
],
zip_safe=False,
requires=[
'requests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='django-planfix',
version='0.4',
description='Add contanct and task to Planfix',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/chadoneba/django-planfix',
long_description=open('README.rst', 'r').read(),
packages=[
'planfix','planfix.management.commands','planfix.migrations','planfix.management'
],
zip_safe=False,
requires=[
'requests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
) | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.353011 | 1 |
tests/test_relative_strength_index.py | dibyajyotidash/https-github.com-kylejusticemagnuson-pyti | 635 | 6622752 | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import relative_strength_index
class TestRelativeStrengthIndex(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.rsi_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, 91.128696376509808, 92.285403839188717, 80.894025017461274,
70.592511652751313, 77.951770109884649, 80.239605929360266,
70.277707266920117, 48.985014949691646, 52.57705771185794,
29.56369140946228, 26.022225467384374, 16.324760280103618,
21.03611935582866, 14.450447899471271, 14.399340568284714,
37.838548007732264, 55.142917715980929, 50.59361566108123,
43.932577726393127, 42.491462993376658, 51.948772620890892,
51.609285637335049, 38.336269106131539, 54.530400068283633,
45.780486043398241, 39.802974309817188, 23.233126351199488,
46.011558572428697, 53.622238465968238, 69.105150494992742,
71.943927752634877, 61.401768306657985, 44.82267986085872,
45.738292422095121, 51.282952118549289, 63.529113661498116,
66.172797931315301, 71.576348726382349, 68.569307930673602,
71.64205694415736, 75.538772783985877, 79.336902977248528,
60.902733049554506, 57.563547365591383, 63.029070885613358,
54.405071613499608, 38.057877659724475, 36.069340676148251,
35.551867201277034, 48.630430960266096, 45.463148398801508,
53.123523689152051, 35.576818846625244, 39.779600801796533,
37.488732721794584, 40.930916165630222, 37.139626791998928,
46.260584259310058, 43.348151988222661, 59.382590313669397,
60.591197175338664, 42.3532081852956, 62.815591971052257,
64.047199117793127, 44.526399707555605, 37.867766944276163,
32.926883858681308, 38.578727318762738, 45.537296891112497,
31.423697245000028, 29.642545839858357, 49.557967472745197,
36.771050674724215, 55.783922272827709, 60.850265479188977,
60.881597670697779, 44.866787790759361, 38.850530452564023,
37.156348420849575, 41.261293848032722, 48.819610310127324,
44.263098553227302, 40.881844554211057, 46.731306039053081,
53.854133357002794, 54.728745404768361, 61.325753528491738,
67.792188074305614, 72.537957869691667, 56.664633742766149,
66.56520901272998, 68.536344606136481, 70.721673114559167,
71.324374049055464, 65.674281500057276, 54.197082940262945,
60.461845412582441, 31.25312253767251, 37.503538994748723,
42.046913819284043, 45.307280972084463, 25.306147643535695,
10.512959368525003, 9.5014336746420298, 8.4166453804807304,
8.3820873579604438, 14.111318591298641, 11.685025961279209,
20.65996515862561, 17.694039794502245, 17.521324009591709,
8.8490257823457767, 9.3698586066620919, 7.4665597738277256,
17.148873648403978, 14.055923198144484, 10.156378730298115,
7.4037283218177805, 12.053100590713569, 10.53811332066924,
25.333373455819356]
self.rsi_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 83.74284752608537, 75.96946564885495,
80.490137054219588, 81.983526674546667, 74.55846637012155,
57.264837834932379, 59.501394660820722, 38.362259492388972,
34.710619925495479, 23.945731338271358, 27.379877500246252,
20.310320855359052, 20.253855087073887, 37.809698498673697,
51.990703125851546, 48.582765106851404, 43.582541362843514,
42.50916707089258, 49.601916414104501, 49.369883919182122,
39.942200405447721, 52.024905041825861, 45.658135197528608,
41.167452613422348, 27.233397029554069, 45.152340120725469,
51.449547166103386, 65.039593582662647, 67.653150302178176,
59.709613911194225, 46.565572793708569, 47.256610740681815,
51.365100917086565, 60.897490376517084, 63.042323966087459,
67.495584787282723, 65.434078964944788, 67.846384422099959,
70.988436326409783, 74.200314568068876, 61.549503024941693,
59.151584620311134, 62.866539861761467, 56.81042956236201,
44.106947788205488, 42.448288369054524, 42.028967758173984,
50.322846355738633, 48.074072207880782, 53.083971350819432,
40.134731954707448, 42.93589935749435, 41.215459072106583,
43.401066237392961, 40.727556785126616, 46.480811088913121,
44.572874835377384, 55.484893235337545, 56.37126957702457,
44.232367090010179, 59.439912681746065, 60.431643962144456,
46.193449615658309, 40.898881381607907, 36.861327848228875,
40.787483517743432, 45.666472255959192, 34.830534792309251,
33.368585479985455, 47.880807749553838, 38.15728800508743,
53.231675938448284, 57.479746680396758, 57.505443490007785,
45.759020349263054, 41.056963100268781, 39.734701257792807,
42.558210405522132, 47.821966983512596, 44.701022921450722,
42.367504358881497, 46.211656127421485, 51.061411550381976,
51.661135467303595, 56.255989673528305, 61.107043819371263,
64.934729239492214, 55.11391303791622, 62.435681938638261,
63.970519220630983, 65.66221548791998, 66.12066484831152,
62.784212880382157, 55.636595393895853, 59.60733748851144,
37.667545161264741, 41.885033057845654, 44.944174880919768,
47.118093459179086, 30.8301816428866, 15.043559705306762,
13.816478713123985, 12.514236683060346, 12.473802863884089,
16.641092414477271, 14.463409950597409, 20.945479375484567,
18.717292910371967, 18.587980971319979, 11.193518363714105,
11.582457766777452, 9.7653340846728298, 16.90800205206591,
14.594269077494857, 11.424620834757548, 8.945086851889954,
12.473974212697996, 11.281727789650006, 22.415284637880248]
self.rsi_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 80.382399161864811,
81.58297360799952, 75.721434246783815, 61.416457310356634,
63.033041416747935, 44.113161156152088, 40.604396885470429,
29.742114212280526, 32.422102432320955, 25.21394366994187,
25.155236117171285, 39.143302960913552, 51.071910062341495,
48.243694050081899, 44.089016517558669, 43.201607970756832,
48.914054247164799, 48.732527235700935, 41.227053752801318,
50.919606399372469, 45.826854315089811, 42.171708191280061,
30.049752911709177, 44.909852138547663, 50.314889030321169,
62.443899229961993, 64.851807409862261, 58.376274499138454,
47.330458997163682, 47.900894356054458, 51.25369437632294,
59.235604904143777, 61.070635039058558, 64.906801937622419,
63.320673549928046, 65.343205295182088, 68.005029130114806,
70.78431754824166, 61.190196581873671, 59.331226550164423,
62.198342414125172, 57.572881550264221, 47.339736611786314,
45.952135181740232, 45.607320085790207, 51.577755174521243,
49.83880562787823, 53.482434731949304, 43.264888695398618,
45.30903418461596, 43.93606152736691, 45.483853514142019,
43.425935322167604, 47.459192056993352, 46.051586106621635,
53.976794157654723, 54.645265825608782, 45.742583508406462,
57.471387533431276, 58.277325091890276, 47.293233939087905,
42.967147461659778, 39.601486397034741, 42.540543643550443,
46.215041647309448, 37.428884268109378, 36.193678284783815,
47.391170320790039, 39.50500571954332, 51.865056655425036,
55.488977253711553, 55.51065657697405, 46.215140099782296,
42.349549102920783, 41.262890464938629, 43.417280351603608,
47.452054449078041, 45.071367075450659, 43.289342419501764,
46.124569379604779, 49.753119249271819, 50.201807527824464,
53.65083228388503, 57.413613311765189, 60.483698025569829,
53.672576817758376, 59.400953966546119, 60.636758883775606,
61.992968663776942, 62.35636887763178, 60.131501419424886,
55.251850141024732, 58.105911946540154, 41.399935060427609,
44.460837486574761, 46.684294876443616, 48.257083944192289,
34.874539878819078, 19.177931796772313, 17.825060113615422,
16.390929545473469, 16.346953960508898, 19.592095128093803,
17.504375219974605, 22.53298531123572, 20.613919308674014,
20.502900337132814, 13.662549952743703, 13.971111646021569,
12.173903805406653, 17.806248012431396, 15.829536909459037,
12.98760468233867, 10.621412075858444, 13.468704666819292,
12.419802813127859, 21.359400151132036]
def test_relative_strength_index_period_6(self):
period = 6
rsi = relative_strength_index.relative_strength_index(self.data, period)
np.testing.assert_array_equal(rsi, self.rsi_period_6_expected)
def test_relative_strength_index_period_8(self):
period = 8
rsi = relative_strength_index.relative_strength_index(self.data, period)
np.testing.assert_array_equal(rsi, self.rsi_period_8_expected)
def test_relative_strength_index_period_10(self):
period = 10
rsi = relative_strength_index.relative_strength_index(self.data, period)
np.testing.assert_array_equal(rsi, self.rsi_period_10_expected)
def test_relative_strength_index_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
relative_strength_index.relative_strength_index(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import relative_strength_index
class TestRelativeStrengthIndex(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.rsi_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, 91.128696376509808, 92.285403839188717, 80.894025017461274,
70.592511652751313, 77.951770109884649, 80.239605929360266,
70.277707266920117, 48.985014949691646, 52.57705771185794,
29.56369140946228, 26.022225467384374, 16.324760280103618,
21.03611935582866, 14.450447899471271, 14.399340568284714,
37.838548007732264, 55.142917715980929, 50.59361566108123,
43.932577726393127, 42.491462993376658, 51.948772620890892,
51.609285637335049, 38.336269106131539, 54.530400068283633,
45.780486043398241, 39.802974309817188, 23.233126351199488,
46.011558572428697, 53.622238465968238, 69.105150494992742,
71.943927752634877, 61.401768306657985, 44.82267986085872,
45.738292422095121, 51.282952118549289, 63.529113661498116,
66.172797931315301, 71.576348726382349, 68.569307930673602,
71.64205694415736, 75.538772783985877, 79.336902977248528,
60.902733049554506, 57.563547365591383, 63.029070885613358,
54.405071613499608, 38.057877659724475, 36.069340676148251,
35.551867201277034, 48.630430960266096, 45.463148398801508,
53.123523689152051, 35.576818846625244, 39.779600801796533,
37.488732721794584, 40.930916165630222, 37.139626791998928,
46.260584259310058, 43.348151988222661, 59.382590313669397,
60.591197175338664, 42.3532081852956, 62.815591971052257,
64.047199117793127, 44.526399707555605, 37.867766944276163,
32.926883858681308, 38.578727318762738, 45.537296891112497,
31.423697245000028, 29.642545839858357, 49.557967472745197,
36.771050674724215, 55.783922272827709, 60.850265479188977,
60.881597670697779, 44.866787790759361, 38.850530452564023,
37.156348420849575, 41.261293848032722, 48.819610310127324,
44.263098553227302, 40.881844554211057, 46.731306039053081,
53.854133357002794, 54.728745404768361, 61.325753528491738,
67.792188074305614, 72.537957869691667, 56.664633742766149,
66.56520901272998, 68.536344606136481, 70.721673114559167,
71.324374049055464, 65.674281500057276, 54.197082940262945,
60.461845412582441, 31.25312253767251, 37.503538994748723,
42.046913819284043, 45.307280972084463, 25.306147643535695,
10.512959368525003, 9.5014336746420298, 8.4166453804807304,
8.3820873579604438, 14.111318591298641, 11.685025961279209,
20.65996515862561, 17.694039794502245, 17.521324009591709,
8.8490257823457767, 9.3698586066620919, 7.4665597738277256,
17.148873648403978, 14.055923198144484, 10.156378730298115,
7.4037283218177805, 12.053100590713569, 10.53811332066924,
25.333373455819356]
self.rsi_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 83.74284752608537, 75.96946564885495,
80.490137054219588, 81.983526674546667, 74.55846637012155,
57.264837834932379, 59.501394660820722, 38.362259492388972,
34.710619925495479, 23.945731338271358, 27.379877500246252,
20.310320855359052, 20.253855087073887, 37.809698498673697,
51.990703125851546, 48.582765106851404, 43.582541362843514,
42.50916707089258, 49.601916414104501, 49.369883919182122,
39.942200405447721, 52.024905041825861, 45.658135197528608,
41.167452613422348, 27.233397029554069, 45.152340120725469,
51.449547166103386, 65.039593582662647, 67.653150302178176,
59.709613911194225, 46.565572793708569, 47.256610740681815,
51.365100917086565, 60.897490376517084, 63.042323966087459,
67.495584787282723, 65.434078964944788, 67.846384422099959,
70.988436326409783, 74.200314568068876, 61.549503024941693,
59.151584620311134, 62.866539861761467, 56.81042956236201,
44.106947788205488, 42.448288369054524, 42.028967758173984,
50.322846355738633, 48.074072207880782, 53.083971350819432,
40.134731954707448, 42.93589935749435, 41.215459072106583,
43.401066237392961, 40.727556785126616, 46.480811088913121,
44.572874835377384, 55.484893235337545, 56.37126957702457,
44.232367090010179, 59.439912681746065, 60.431643962144456,
46.193449615658309, 40.898881381607907, 36.861327848228875,
40.787483517743432, 45.666472255959192, 34.830534792309251,
33.368585479985455, 47.880807749553838, 38.15728800508743,
53.231675938448284, 57.479746680396758, 57.505443490007785,
45.759020349263054, 41.056963100268781, 39.734701257792807,
42.558210405522132, 47.821966983512596, 44.701022921450722,
42.367504358881497, 46.211656127421485, 51.061411550381976,
51.661135467303595, 56.255989673528305, 61.107043819371263,
64.934729239492214, 55.11391303791622, 62.435681938638261,
63.970519220630983, 65.66221548791998, 66.12066484831152,
62.784212880382157, 55.636595393895853, 59.60733748851144,
37.667545161264741, 41.885033057845654, 44.944174880919768,
47.118093459179086, 30.8301816428866, 15.043559705306762,
13.816478713123985, 12.514236683060346, 12.473802863884089,
16.641092414477271, 14.463409950597409, 20.945479375484567,
18.717292910371967, 18.587980971319979, 11.193518363714105,
11.582457766777452, 9.7653340846728298, 16.90800205206591,
14.594269077494857, 11.424620834757548, 8.945086851889954,
12.473974212697996, 11.281727789650006, 22.415284637880248]
self.rsi_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 80.382399161864811,
81.58297360799952, 75.721434246783815, 61.416457310356634,
63.033041416747935, 44.113161156152088, 40.604396885470429,
29.742114212280526, 32.422102432320955, 25.21394366994187,
25.155236117171285, 39.143302960913552, 51.071910062341495,
48.243694050081899, 44.089016517558669, 43.201607970756832,
48.914054247164799, 48.732527235700935, 41.227053752801318,
50.919606399372469, 45.826854315089811, 42.171708191280061,
30.049752911709177, 44.909852138547663, 50.314889030321169,
62.443899229961993, 64.851807409862261, 58.376274499138454,
47.330458997163682, 47.900894356054458, 51.25369437632294,
59.235604904143777, 61.070635039058558, 64.906801937622419,
63.320673549928046, 65.343205295182088, 68.005029130114806,
70.78431754824166, 61.190196581873671, 59.331226550164423,
62.198342414125172, 57.572881550264221, 47.339736611786314,
45.952135181740232, 45.607320085790207, 51.577755174521243,
49.83880562787823, 53.482434731949304, 43.264888695398618,
45.30903418461596, 43.93606152736691, 45.483853514142019,
43.425935322167604, 47.459192056993352, 46.051586106621635,
53.976794157654723, 54.645265825608782, 45.742583508406462,
57.471387533431276, 58.277325091890276, 47.293233939087905,
42.967147461659778, 39.601486397034741, 42.540543643550443,
46.215041647309448, 37.428884268109378, 36.193678284783815,
47.391170320790039, 39.50500571954332, 51.865056655425036,
55.488977253711553, 55.51065657697405, 46.215140099782296,
42.349549102920783, 41.262890464938629, 43.417280351603608,
47.452054449078041, 45.071367075450659, 43.289342419501764,
46.124569379604779, 49.753119249271819, 50.201807527824464,
53.65083228388503, 57.413613311765189, 60.483698025569829,
53.672576817758376, 59.400953966546119, 60.636758883775606,
61.992968663776942, 62.35636887763178, 60.131501419424886,
55.251850141024732, 58.105911946540154, 41.399935060427609,
44.460837486574761, 46.684294876443616, 48.257083944192289,
34.874539878819078, 19.177931796772313, 17.825060113615422,
16.390929545473469, 16.346953960508898, 19.592095128093803,
17.504375219974605, 22.53298531123572, 20.613919308674014,
20.502900337132814, 13.662549952743703, 13.971111646021569,
12.173903805406653, 17.806248012431396, 15.829536909459037,
12.98760468233867, 10.621412075858444, 13.468704666819292,
12.419802813127859, 21.359400151132036]
def test_relative_strength_index_period_6(self):
period = 6
rsi = relative_strength_index.relative_strength_index(self.data, period)
np.testing.assert_array_equal(rsi, self.rsi_period_6_expected)
def test_relative_strength_index_period_8(self):
period = 8
rsi = relative_strength_index.relative_strength_index(self.data, period)
np.testing.assert_array_equal(rsi, self.rsi_period_8_expected)
def test_relative_strength_index_period_10(self):
period = 10
rsi = relative_strength_index.relative_strength_index(self.data, period)
np.testing.assert_array_equal(rsi, self.rsi_period_10_expected)
def test_relative_strength_index_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
relative_strength_index.relative_strength_index(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| en | 0.893735 | Create data to use for testing. | 2.303478 | 2 |
normal_map.py | thinhnguyenuit/sombra | 10 | 6622753 | import numpy as np
# Local Modules
from constants import MAX_COLOR_VALUE
from texture import ImageTexture, SolidImageTexture
import utils
class NormalMap:
def __init__(self, texture, obj):
self.texture = texture
self.obj = obj
def get_normal(self, p):
if isinstance(self.texture, ImageTexture):
u, v = self.obj.uvmap(p)
color = self.texture.get_color(u, v)
else:
color = self.texture.get_color(p)
r, g, b = color[:3]
# x and y will be from [-1 to 1] and z from [0 to 1]
x = 2 * (r / np.float(MAX_COLOR_VALUE)) - 1
y = 2 * (g / np.float(MAX_COLOR_VALUE)) - 1
z = (b / float(MAX_COLOR_VALUE))
normal_vector = np.array([x, y, z])
normal_vector = utils.normalize(normal_vector)
local_diff = normal_vector - np.array([0, 0, 1])
final_normal = self.obj.physical_normal_at(p) + local_diff
return final_normal
| import numpy as np
# Local Modules
from constants import MAX_COLOR_VALUE
from texture import ImageTexture, SolidImageTexture
import utils
class NormalMap:
def __init__(self, texture, obj):
self.texture = texture
self.obj = obj
def get_normal(self, p):
if isinstance(self.texture, ImageTexture):
u, v = self.obj.uvmap(p)
color = self.texture.get_color(u, v)
else:
color = self.texture.get_color(p)
r, g, b = color[:3]
# x and y will be from [-1 to 1] and z from [0 to 1]
x = 2 * (r / np.float(MAX_COLOR_VALUE)) - 1
y = 2 * (g / np.float(MAX_COLOR_VALUE)) - 1
z = (b / float(MAX_COLOR_VALUE))
normal_vector = np.array([x, y, z])
normal_vector = utils.normalize(normal_vector)
local_diff = normal_vector - np.array([0, 0, 1])
final_normal = self.obj.physical_normal_at(p) + local_diff
return final_normal
| en | 0.954803 | # Local Modules # x and y will be from [-1 to 1] and z from [0 to 1] | 2.761683 | 3 |
dict/views.py | uglyboxer/slang | 0 | 6622754 | <reponame>uglyboxer/slang<filename>dict/views.py<gh_stars>0
import requests
import json
import urllib
import logging
import sys
from lxml import html
from django.shortcuts import render
from django.db.models import Q
from dict.models import Entry
from dict.utils import get_translation
def home_page(request):
entries = Entry.objects.all().order_by('-queries')[:5]
return render(request, 'home.html', {'title': 'Slictionary',
'entries': entries})
def search(request):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
search_term = request.GET['search-term']
search_term = search_term.lower()
try:
term_entry = Entry.objects.get(word=search_term)
term_entry.queries += 1
term_entry.save()
return render(request, 'results.html', {'title': 'Slictionary',
'word': term_entry.word,
'definition': term_entry.definition,
'translation': term_entry.translation})
except:
# logger.exception('Error in db lookup')
if not request.GET.get('spanish', ''):
f = {'term': search_term}
q = urllib.urlencode(f)
url = 'http://api.urbandictionary.com/v0/define?' + q
translation = ""
try:
r = requests.get(url)
data = r.json()
if data['list']:
ret_def = data['list'][0]['definition']
entry = Entry(word=search_term, definition=ret_def, response1='',
response2='')
entry.save()
else:
ret_def = "No matching results found."
except requests.exceptions.ConnectionError:
# logger.exception('Error in urbandict call')
ret_def = "Connection error, please try again later."
return render(request, 'results.html', {'title': 'Slictionary',
'word': search_term,
'definition': ret_def,
'translation': translation})
else:
try:
url_search_term = urllib.quote_plus(search_term)
page = requests.get(
'http://www.asihablamos.com/word/palabra/{}.php?pais=MX'\
.format(url_search_term))
tree = html.fromstring(page.content)
ret_def = tree.xpath('//div[@class="definicion"]/div[2]/text()')
if ret_def:
definition = ret_def[0]
translation = get_translation(ret_def)
entry = Entry(word=search_term,
definition=definition,
translation=translation,
response1='',
response2='')
entry.save()
else:
definition = "No matching results found."
translation = ""
except requests.exceptions.ConnectionError:
# logger.exception('Error in asihablamos call')
pass
return render(request, 'results.html', {'title': 'Slictionary',
'word': search_term,
'definition': definition,
'translation': translation})
| import requests
import json
import urllib
import logging
import sys
from lxml import html
from django.shortcuts import render
from django.db.models import Q
from dict.models import Entry
from dict.utils import get_translation
def home_page(request):
entries = Entry.objects.all().order_by('-queries')[:5]
return render(request, 'home.html', {'title': 'Slictionary',
'entries': entries})
def search(request):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
search_term = request.GET['search-term']
search_term = search_term.lower()
try:
term_entry = Entry.objects.get(word=search_term)
term_entry.queries += 1
term_entry.save()
return render(request, 'results.html', {'title': 'Slictionary',
'word': term_entry.word,
'definition': term_entry.definition,
'translation': term_entry.translation})
except:
# logger.exception('Error in db lookup')
if not request.GET.get('spanish', ''):
f = {'term': search_term}
q = urllib.urlencode(f)
url = 'http://api.urbandictionary.com/v0/define?' + q
translation = ""
try:
r = requests.get(url)
data = r.json()
if data['list']:
ret_def = data['list'][0]['definition']
entry = Entry(word=search_term, definition=ret_def, response1='',
response2='')
entry.save()
else:
ret_def = "No matching results found."
except requests.exceptions.ConnectionError:
# logger.exception('Error in urbandict call')
ret_def = "Connection error, please try again later."
return render(request, 'results.html', {'title': 'Slictionary',
'word': search_term,
'definition': ret_def,
'translation': translation})
else:
try:
url_search_term = urllib.quote_plus(search_term)
page = requests.get(
'http://www.asihablamos.com/word/palabra/{}.php?pais=MX'\
.format(url_search_term))
tree = html.fromstring(page.content)
ret_def = tree.xpath('//div[@class="definicion"]/div[2]/text()')
if ret_def:
definition = ret_def[0]
translation = get_translation(ret_def)
entry = Entry(word=search_term,
definition=definition,
translation=translation,
response1='',
response2='')
entry.save()
else:
definition = "No matching results found."
translation = ""
except requests.exceptions.ConnectionError:
# logger.exception('Error in asihablamos call')
pass
return render(request, 'results.html', {'title': 'Slictionary',
'word': search_term,
'definition': definition,
'translation': translation}) | en | 0.203548 | # logger.exception('Error in db lookup') # logger.exception('Error in urbandict call') # logger.exception('Error in asihablamos call') | 2.23997 | 2 |
mocks/openedx/core/lib/api/__init__.py | appsembler/course-cccess-groups | 4 | 6622755 | <reponame>appsembler/course-cccess-groups<filename>mocks/openedx/core/lib/api/__init__.py
"""
Mocks for openedx.core.lib.api.
"""
| """
Mocks for openedx.core.lib.api.
""" | en | 0.57496 | Mocks for openedx.core.lib.api. | 1.046962 | 1 |
server/utils.py | cyy0523xc/openpose-server | 0 | 6622756 | # -*- coding: utf-8 -*-
#
#
# Author: alex
# Created Time: 2019年09月09日 星期一 15时51分40秒
import re
import io
import cv2
import base64
from PIL import Image
import numpy as np
def parse_input_image(image='', image_path='', image_type='jpg'):
"""人脸检测(输入的是base64编码的图像)
:param image 图片对象使用base64编码
:param image_path 图片路径
:param image_type 输入图像类型, 取值jpg或者png
:return image
"""
if not image and not image_path:
raise Exception('image参数和image_path参数必须有一个不为空')
if image:
# 自动判断类型
type_str = re.findall('^data:image/.+;base64,', image)
if len(type_str) > 0:
if 'png' in type_str[0]:
image_type = 'png'
image = re.sub('^data:image/.+;base64,', '', image)
image = base64.b64decode(image)
image = Image.open(io.BytesIO(image))
if image_type == 'png': # 先转化为jpg
bg = Image.new("RGB", image.size, (255, 255, 255))
bg.paste(image, image)
image = bg
return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return cv2.imread(image_path)
def parse_output_image(out_img):
"""cv2转base64字符串"""
out_img = Image.fromarray(cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
output_buffer = io.BytesIO()
out_img.save(output_buffer, format='JPEG')
binary_data = output_buffer.getvalue()
return str(base64.b64encode(binary_data), encoding='utf8')
| # -*- coding: utf-8 -*-
#
#
# Author: alex
# Created Time: 2019年09月09日 星期一 15时51分40秒
import re
import io
import cv2
import base64
from PIL import Image
import numpy as np
def parse_input_image(image='', image_path='', image_type='jpg'):
"""人脸检测(输入的是base64编码的图像)
:param image 图片对象使用base64编码
:param image_path 图片路径
:param image_type 输入图像类型, 取值jpg或者png
:return image
"""
if not image and not image_path:
raise Exception('image参数和image_path参数必须有一个不为空')
if image:
# 自动判断类型
type_str = re.findall('^data:image/.+;base64,', image)
if len(type_str) > 0:
if 'png' in type_str[0]:
image_type = 'png'
image = re.sub('^data:image/.+;base64,', '', image)
image = base64.b64decode(image)
image = Image.open(io.BytesIO(image))
if image_type == 'png': # 先转化为jpg
bg = Image.new("RGB", image.size, (255, 255, 255))
bg.paste(image, image)
image = bg
return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return cv2.imread(image_path)
def parse_output_image(out_img):
"""cv2转base64字符串"""
out_img = Image.fromarray(cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
output_buffer = io.BytesIO()
out_img.save(output_buffer, format='JPEG')
binary_data = output_buffer.getvalue()
return str(base64.b64encode(binary_data), encoding='utf8')
| zh | 0.807897 | # -*- coding: utf-8 -*- # # # Author: alex # Created Time: 2019年09月09日 星期一 15时51分40秒 人脸检测(输入的是base64编码的图像) :param image 图片对象使用base64编码 :param image_path 图片路径 :param image_type 输入图像类型, 取值jpg或者png :return image # 自动判断类型 # 先转化为jpg cv2转base64字符串 | 3.125835 | 3 |
Problem 27 - Quadratic primes/quadprim.py | kameranis/Project_Euler | 0 | 6622757 | """
Quadratic primes
Finds a*b where n^2 + a*n + b yields the most primes
<NAME> 21.11.2013
"""
def quad(n, a, b):
return n*n + b*n + a
array=[0]*100000
array[0]=1
array[1]=1
for i in xrange(100000):
if array[i]:
continue
for x in xrange(i, 100000/i):
array[i*x]=1
maxi = 0
maxproduct = 0
for a in range (-999, 1000):
for b in range(-999, 1000):
n = -1
product = 2
while not array[product]:
n += 1
product = quad(n, a, b)
if n > maxi:
maxi = n
maxproduct = a * b
print maxi, maxproduct
| """
Quadratic primes
Finds a*b where n^2 + a*n + b yields the most primes
<NAME> 21.11.2013
"""
def quad(n, a, b):
return n*n + b*n + a
array=[0]*100000
array[0]=1
array[1]=1
for i in xrange(100000):
if array[i]:
continue
for x in xrange(i, 100000/i):
array[i*x]=1
maxi = 0
maxproduct = 0
for a in range (-999, 1000):
for b in range(-999, 1000):
n = -1
product = 2
while not array[product]:
n += 1
product = quad(n, a, b)
if n > maxi:
maxi = n
maxproduct = a * b
print maxi, maxproduct
| en | 0.36535 | Quadratic primes Finds a*b where n^2 + a*n + b yields the most primes <NAME> 21.11.2013 | 3.740986 | 4 |
chapter2/intogen-arrays/lib/pubmed.py | chris-zen/phd-thesis | 1 | 6622758 | import urllib
import urllib2
from lxml import etree
class Pubmed():
def __init__(self):
self.__url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
def find(self, pmid):
if isinstance(pmid, basestring):
pmid = [pmid]
pmid = ",".join(pmid)
data = urllib.urlencode({"db" : "pubmed", "id" : pmid, "retmode" : "xml"})
req = urllib2.Request(self.__url, data)
response = urllib2.urlopen(req)
doc = etree.parse(response, etree.XMLParser(encoding="utf-8"))
root = doc.getroot()
articles = []
for a in root.findall("PubmedArticle"):
a = a.find("MedlineCitation/Article")
if a is not None:
article = {}
article["title"] = a.findtext("ArticleTitle")
article["journal"] = a.findtext("Journal/Title")
article["volume"] = a.findtext("Journal/JournalIssue/Volume")
article["issue"] = a.findtext("Journal/JournalIssue/Issue")
pubdate = a.find("Journal/JournalIssue/PubDate")
if pubdate is not None:
year = pubdate.findtext("Year")
month = pubdate.findtext("Month")
day = pubdate.findtext("Day")
if year is not None:
if month is not None:
if day is not None:
if len(day) < 2:
day = ("0" * (2 - len(day))) + day
date = "{0}-{1}-{2}".format(year, month, day)
else:
date = "{0}-{1}".format(year, month)
else:
date = "{0}".format(year)
else:
date = ""
else:
date = ""
article["date"] = date
authors = []
for auth in a.find("AuthorList"):
last_name = auth.findtext("LastName")
initials = auth.findtext("Initials")
if last_name:
if initials:
authors += [last_name + " " + initials]
else:
authors += [last_name]
if len(authors) > 0:
if len(authors) > 1:
article["short_authors"] = authors[0] + " et al"
else:
article["short_authors"] = authors[0]
else:
article["short_authors"] = ""
article["authors"] = authors
for k,v in article.items():
if v is not None and isinstance(v, basestring):
article[k] = v.strip()
#else:
# article[k] = ""
articles += [article]
return articles
def find(pmid):
return Pubmed().find(pmid) | import urllib
import urllib2
from lxml import etree
class Pubmed():
def __init__(self):
self.__url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
def find(self, pmid):
if isinstance(pmid, basestring):
pmid = [pmid]
pmid = ",".join(pmid)
data = urllib.urlencode({"db" : "pubmed", "id" : pmid, "retmode" : "xml"})
req = urllib2.Request(self.__url, data)
response = urllib2.urlopen(req)
doc = etree.parse(response, etree.XMLParser(encoding="utf-8"))
root = doc.getroot()
articles = []
for a in root.findall("PubmedArticle"):
a = a.find("MedlineCitation/Article")
if a is not None:
article = {}
article["title"] = a.findtext("ArticleTitle")
article["journal"] = a.findtext("Journal/Title")
article["volume"] = a.findtext("Journal/JournalIssue/Volume")
article["issue"] = a.findtext("Journal/JournalIssue/Issue")
pubdate = a.find("Journal/JournalIssue/PubDate")
if pubdate is not None:
year = pubdate.findtext("Year")
month = pubdate.findtext("Month")
day = pubdate.findtext("Day")
if year is not None:
if month is not None:
if day is not None:
if len(day) < 2:
day = ("0" * (2 - len(day))) + day
date = "{0}-{1}-{2}".format(year, month, day)
else:
date = "{0}-{1}".format(year, month)
else:
date = "{0}".format(year)
else:
date = ""
else:
date = ""
article["date"] = date
authors = []
for auth in a.find("AuthorList"):
last_name = auth.findtext("LastName")
initials = auth.findtext("Initials")
if last_name:
if initials:
authors += [last_name + " " + initials]
else:
authors += [last_name]
if len(authors) > 0:
if len(authors) > 1:
article["short_authors"] = authors[0] + " et al"
else:
article["short_authors"] = authors[0]
else:
article["short_authors"] = ""
article["authors"] = authors
for k,v in article.items():
if v is not None and isinstance(v, basestring):
article[k] = v.strip()
#else:
# article[k] = ""
articles += [article]
return articles
def find(pmid):
return Pubmed().find(pmid) | en | 0.636004 | #else: # article[k] = "" | 3.311635 | 3 |
migrate_tool/services/url_list.py | zhengsunf/kitmanzheng | 0 | 6622759 | # -*- coding: utf-8 -*-
from migrate_tool import storage_service
from migrate_tool import task
from logging import getLogger
import requests
import urlparse
import hashlib
logger = getLogger(__name__)
class UrlListService(storage_service.StorageService):
def __init__(self, *args, **kwargs):
self._url_list_file = kwargs['url_list_file']
self._timeout = float(kwargs['timeout'])
self._chunk_size = 1024
self._validator_method = None
if 'validator' in kwargs:
self._validator_method = kwargs['validator']
def download(self, task, local_path):
url_path = task.other
expected_crc = task.size # size stores the sha1 or md5 of file
for i in range(5):
validator = None
if self._validator_method:
if self._validator_method == "sha1":
validator = hashlib.sha1()
elif self._validator_method == "md5":
validator = hashlib.md5()
else:
validator = None
try:
ret = requests.get(url_path, timeout=self._timeout)
if ret.status_code == 200:
with open(local_path, 'wb') as fd:
for chunk in ret.iter_content(self._chunk_size):
if validator:
validator.update(chunk)
fd.write(chunk)
fd.flush()
# validate
if validator:
actual_crc = validator.hexdigest()
actual_crc_upper = actual_crc.upper()
if actual_crc != expected_crc and actual_crc_upper != expected_crc:
logger.debug("{}".format(str({'expected_crc:': expected_crc, 'actual_crc:': actual_crc})))
raise IOError("NOTICE: downloaded file content not valid")
break
else:
# print "task: ", task
raise IOError("NOTICE: download failed")
except:
logger.exception("download failed")
else:
raise IOError("NOTICE: download failed with retry 5")
def upload(self, task, local_path):
raise NotImplementedError
def list(self):
with open(self._url_list_file, 'r') as f:
for line in f:
try:
field = line.split()
if len(field) < 1:
logger.warn("{} is invalid".format(line))
continue
check_value = None
url_path = None
if len(field) == 1:
url_path = field[0]
else:
check_value = field[0].strip()
url_path = field[1]
ret = urlparse.urlparse(url_path)
if ret.path == '':
logger.warn("{} is invalid, No path".format(line))
logger.info("yield new object: {}".format(str({'store_path': ret.path.strip(), 'url_path': url_path.strip()})))
yield task.Task(ret.path.strip()[1:], check_value, url_path.strip())
except Exception:
logger.warn("{} is invalid".format(line))
def exists(self, _path):
raise NotImplementedError
| # -*- coding: utf-8 -*-
from migrate_tool import storage_service
from migrate_tool import task
from logging import getLogger
import requests
import urlparse
import hashlib
logger = getLogger(__name__)
class UrlListService(storage_service.StorageService):
def __init__(self, *args, **kwargs):
self._url_list_file = kwargs['url_list_file']
self._timeout = float(kwargs['timeout'])
self._chunk_size = 1024
self._validator_method = None
if 'validator' in kwargs:
self._validator_method = kwargs['validator']
def download(self, task, local_path):
url_path = task.other
expected_crc = task.size # size stores the sha1 or md5 of file
for i in range(5):
validator = None
if self._validator_method:
if self._validator_method == "sha1":
validator = hashlib.sha1()
elif self._validator_method == "md5":
validator = hashlib.md5()
else:
validator = None
try:
ret = requests.get(url_path, timeout=self._timeout)
if ret.status_code == 200:
with open(local_path, 'wb') as fd:
for chunk in ret.iter_content(self._chunk_size):
if validator:
validator.update(chunk)
fd.write(chunk)
fd.flush()
# validate
if validator:
actual_crc = validator.hexdigest()
actual_crc_upper = actual_crc.upper()
if actual_crc != expected_crc and actual_crc_upper != expected_crc:
logger.debug("{}".format(str({'expected_crc:': expected_crc, 'actual_crc:': actual_crc})))
raise IOError("NOTICE: downloaded file content not valid")
break
else:
# print "task: ", task
raise IOError("NOTICE: download failed")
except:
logger.exception("download failed")
else:
raise IOError("NOTICE: download failed with retry 5")
def upload(self, task, local_path):
raise NotImplementedError
def list(self):
with open(self._url_list_file, 'r') as f:
for line in f:
try:
field = line.split()
if len(field) < 1:
logger.warn("{} is invalid".format(line))
continue
check_value = None
url_path = None
if len(field) == 1:
url_path = field[0]
else:
check_value = field[0].strip()
url_path = field[1]
ret = urlparse.urlparse(url_path)
if ret.path == '':
logger.warn("{} is invalid, No path".format(line))
logger.info("yield new object: {}".format(str({'store_path': ret.path.strip(), 'url_path': url_path.strip()})))
yield task.Task(ret.path.strip()[1:], check_value, url_path.strip())
except Exception:
logger.warn("{} is invalid".format(line))
def exists(self, _path):
raise NotImplementedError
| en | 0.699443 | # -*- coding: utf-8 -*- # size stores the sha1 or md5 of file # validate # print "task: ", task | 2.297465 | 2 |
torchlite/data/fetcher.py | EKami/EzeeML | 35 | 6622760 | import urllib.request
import os
from kaggle_data.downloader import KaggleDataDownloader
from tqdm import tqdm
class KaggleDatasetFetcher:
"""
A tool used to automatically download datasets from Kaggle
TODO: Use https://github.com/Kaggle/kaggle-api
"""
@staticmethod
def download_dataset(competition_name: str, competition_files: list,
competition_files_ext: list, output_folder: str):
"""
Downloads the dataset and return the input paths.
Do not download again if the data is already present.
You need to define $KAGGLE_USER and $KAGGLE_PASSWD in your environment
and you must accept the competition rules beforehand.
This downloader uses https://github.com/EKami/kaggle-data-downloader
and assumes everything is properly installed.
Args:
competition_name (str): The name of the competition
competition_files (list): List of files for the competition (in their uncompressed format)
competition_files_ext (list): List of extensions for the competition files in the same order
as competition_files. Ex: 'zip', '7z', 'xz'
output_folder (str): Path to save the downloaded files
Returns:
tuple: (file_names, files_path)
"""
assert len(competition_files) == len(competition_files_ext), \
"Length of competition_files and competition_files_ext do not match"
datasets_path = [output_folder + f for f in competition_files]
is_dataset_present = True
for file in datasets_path:
if not os.path.exists(file):
is_dataset_present = False
if not is_dataset_present:
# Put your Kaggle user name and password in a $KAGGLE_USER and $KAGGLE_PASSWD env vars respectively
downloader = KaggleDataDownloader(os.getenv("KAGGLE_USER"), os.getenv("KAGGLE_PASSWD"), competition_name)
zipfiles = [file + "." + ext for file, ext in zip(competition_files, competition_files_ext)]
for file in zipfiles:
downloader.download_dataset(file, output_folder)
# Unzip the files
zipdatasets_path = [output_folder + f for f in zipfiles]
for path in zipdatasets_path:
downloader.decompress(path, output_folder)
os.remove(path)
else:
print("All datasets are present.")
return competition_files, datasets_path
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
class WebFetcher:
"""
A tool used to automatically download datasets from the web
"""
@staticmethod
def download_dataset(url: str, output_folder: str, decompress: bool):
"""
Downloads the dataset and return the input paths.
Do not download again if the data is already present.
Args:
url (str): Http link to the archive
output_folder (str): Path to save the downloaded files
decompress (bool): To uncompress the downloaded archive
Returns:
tuple: (file_name, file_path)
"""
file_name = os.path.split(url)[-1]
output_file_arch = os.path.join(output_folder, file_name)
if not os.path.exists(output_file_arch):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
print('Beginning file download...')
with TqdmUpTo(unit='B', unit_scale=True, miniters=1,
desc="Downloading {}".format(file_name)) as t:
file, _ = urllib.request.urlretrieve(url, output_file_arch, reporthook=t.update_to)
print("Unzipping file...")
if decompress:
KaggleDataDownloader.decompress(file, output_folder)
else:
print("File already exists.")
return file_name, output_file_arch
| import urllib.request
import os
from kaggle_data.downloader import KaggleDataDownloader
from tqdm import tqdm
class KaggleDatasetFetcher:
"""
A tool used to automatically download datasets from Kaggle
TODO: Use https://github.com/Kaggle/kaggle-api
"""
@staticmethod
def download_dataset(competition_name: str, competition_files: list,
competition_files_ext: list, output_folder: str):
"""
Downloads the dataset and return the input paths.
Do not download again if the data is already present.
You need to define $KAGGLE_USER and $KAGGLE_PASSWD in your environment
and you must accept the competition rules beforehand.
This downloader uses https://github.com/EKami/kaggle-data-downloader
and assumes everything is properly installed.
Args:
competition_name (str): The name of the competition
competition_files (list): List of files for the competition (in their uncompressed format)
competition_files_ext (list): List of extensions for the competition files in the same order
as competition_files. Ex: 'zip', '7z', 'xz'
output_folder (str): Path to save the downloaded files
Returns:
tuple: (file_names, files_path)
"""
assert len(competition_files) == len(competition_files_ext), \
"Length of competition_files and competition_files_ext do not match"
datasets_path = [output_folder + f for f in competition_files]
is_dataset_present = True
for file in datasets_path:
if not os.path.exists(file):
is_dataset_present = False
if not is_dataset_present:
# Put your Kaggle user name and password in a $KAGGLE_USER and $KAGGLE_PASSWD env vars respectively
downloader = KaggleDataDownloader(os.getenv("KAGGLE_USER"), os.getenv("KAGGLE_PASSWD"), competition_name)
zipfiles = [file + "." + ext for file, ext in zip(competition_files, competition_files_ext)]
for file in zipfiles:
downloader.download_dataset(file, output_folder)
# Unzip the files
zipdatasets_path = [output_folder + f for f in zipfiles]
for path in zipdatasets_path:
downloader.decompress(path, output_folder)
os.remove(path)
else:
print("All datasets are present.")
return competition_files, datasets_path
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
class WebFetcher:
"""
A tool used to automatically download datasets from the web
"""
@staticmethod
def download_dataset(url: str, output_folder: str, decompress: bool):
"""
Downloads the dataset and return the input paths.
Do not download again if the data is already present.
Args:
url (str): Http link to the archive
output_folder (str): Path to save the downloaded files
decompress (bool): To uncompress the downloaded archive
Returns:
tuple: (file_name, file_path)
"""
file_name = os.path.split(url)[-1]
output_file_arch = os.path.join(output_folder, file_name)
if not os.path.exists(output_file_arch):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
print('Beginning file download...')
with TqdmUpTo(unit='B', unit_scale=True, miniters=1,
desc="Downloading {}".format(file_name)) as t:
file, _ = urllib.request.urlretrieve(url, output_file_arch, reporthook=t.update_to)
print("Unzipping file...")
if decompress:
KaggleDataDownloader.decompress(file, output_folder)
else:
print("File already exists.")
return file_name, output_file_arch
| en | 0.726472 | A tool used to automatically download datasets from Kaggle TODO: Use https://github.com/Kaggle/kaggle-api Downloads the dataset and return the input paths. Do not download again if the data is already present. You need to define $KAGGLE_USER and $KAGGLE_PASSWD in your environment and you must accept the competition rules beforehand. This downloader uses https://github.com/EKami/kaggle-data-downloader and assumes everything is properly installed. Args: competition_name (str): The name of the competition competition_files (list): List of files for the competition (in their uncompressed format) competition_files_ext (list): List of extensions for the competition files in the same order as competition_files. Ex: 'zip', '7z', 'xz' output_folder (str): Path to save the downloaded files Returns: tuple: (file_names, files_path) # Put your Kaggle user name and password in a $KAGGLE_USER and $KAGGLE_PASSWD env vars respectively # Unzip the files Provides `update_to(n)` which uses `tqdm.update(delta_n)`. b : int, optional Number of blocks transferred so far [default: 1]. bsize : int, optional Size of each block (in tqdm units) [default: 1]. tsize : int, optional Total size (in tqdm units). If [default: None] remains unchanged. # will also set self.n = b * bsize A tool used to automatically download datasets from the web Downloads the dataset and return the input paths. Do not download again if the data is already present. Args: url (str): Http link to the archive output_folder (str): Path to save the downloaded files decompress (bool): To uncompress the downloaded archive Returns: tuple: (file_name, file_path) | 2.914596 | 3 |
src/solutions/common/bizz/forms/statistics.py | goubertbrent/oca-backend | 0 | 6622761 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from datetime import datetime
from google.appengine.ext import ndb
from rogerthat.rpc import users
from rogerthat.service.api.forms import service_api
from rogerthat.to.forms import FormSectionValueTO, TextInputComponentValueTO, MultiSelectComponentValueTO, \
DatetimeComponentValueTO, FileComponentValueTO, LocationComponentValueTO
from solutions.common.bizz.forms.util import remove_sensitive_answers
from solutions.common.models.forms import FormStatisticsShardConfig, FormStatisticsShard
from solutions.common.to.forms import FormStatisticsTO
def get_random_shard_number(form_id):
shard_config_key = FormStatisticsShardConfig.create_key(form_id)
shard_config = shard_config_key.get()
if not shard_config:
shard_config = FormStatisticsShardConfig(key=shard_config_key)
shard_config.put()
return shard_config.get_random_shard()
@ndb.transactional(xg=True)
def update_form_statistics(service_user, submission, shard_number):
# type: (users.User, FormSubmission, int) -> None
with users.set_user(service_user):
form = service_api.get_form(submission.form_id)
to_put = []
shard = _update_form_statistics(submission, form, shard_number)
submission.statistics_shard_id = shard.key.id()
to_put.append(submission)
to_put.append(shard)
ndb.put_multi(to_put)
@ndb.transactional()
def _update_form_statistics(submission, form, shard_number):
# type: (FormSubmission, DynamicFormTO, int) -> FormStatisticsShard
shard_key = FormStatisticsShard.create_key(
FormStatisticsShard.SHARD_KEY_TEMPLATE % (submission.form_id, shard_number))
shard = shard_key.get()
if not shard:
shard = FormStatisticsShard(key=shard_key)
sections = remove_sensitive_answers(form.sections, FormSectionValueTO.from_list(submission.sections))
shard.data = _get_shard_data(shard.data or {}, sections)
shard.count += 1
return shard
def _get_shard_data(summarized_data, sections):
# type: (dict, list[FormSectionValueTO]) -> dict
for section in sections:
if section.id not in summarized_data:
summarized_data[section.id] = {}
section_data = summarized_data[section.id]
for component in section.components:
if component.id not in section_data:
section_data[component.id] = {}
component_data = section_data[component.id]
if isinstance(component, TextInputComponentValueTO):
_increment_value_count(component_data, component.value)
elif isinstance(component, MultiSelectComponentValueTO):
for value in component.values:
_increment_value_count(component_data, value)
elif isinstance(component, DatetimeComponentValueTO):
if component.year == 0:
# format == 'time'
# 09:15 -> 915
val = component.hour * 100 + component.minute
_increment_value_count(component_data, val)
else:
# without Z at the end because this was selected in the user his timezone
naive_iso_str = datetime(year=component.year, month=component.month, day=component.day,
hour=component.hour, minute=component.minute).isoformat()
_increment_value_count(component_data, naive_iso_str)
elif isinstance(component, FileComponentValueTO):
if not isinstance(component_data, list):
component_data = []
component_data.append(component.to_statistics())
elif isinstance(component, LocationComponentValueTO):
if not isinstance(component_data, list):
component_data = []
component_data.append(component.to_statistics())
section_data[component.id] = component_data
summarized_data[section.id] = section_data
return summarized_data
def _increment_value_count(dictionary, value):
# avoid 'null' string as value
if value is None:
value = ''
if value not in dictionary:
dictionary[value] = 1
else:
dictionary[value] += 1
return dictionary
def _decrement_value_count(dictionary, value):
# avoid 'null' string as value
if value is None:
value = ''
if value in dictionary:
if dictionary[value] == 1:
del dictionary[value]
else:
dictionary[value] -= 1
return dictionary
def get_all_statistic_keys(form_id):
keys = [FormStatisticsShardConfig.create_key(form_id)]
keys.extend(FormStatisticsShardConfig.get_all_keys(form_id))
return keys
@ndb.transactional()
def increase_shards(form_id, shard_count):
"""Increase the number of shards for a given sharded counter. """
key = FormStatisticsShardConfig.create_key(form_id)
config = key.get() or FormStatisticsShardConfig(key=key)
if config.num_shards < shard_count:
config.shard_count = shard_count
config.put()
def get_form_statistics(form):
# type: (Form) -> FormStatisticsTO
shards = ndb.get_multi(FormStatisticsShardConfig.get_all_keys(form.id)) # type: list[FormStatisticsShard]
# Summarize statistics from all shards into one object
total_count = 0
summarized = {}
for shard in shards:
if not shard:
continue
total_count += shard.count
for section_id in shard.data:
section_data = shard.data[section_id]
if section_id not in summarized:
summarized[section_id] = {}
for component_id in section_data:
component_data = section_data[component_id]
if component_id not in summarized[section_id]:
summarized[section_id][component_id] = None
for val in component_data:
if isinstance(val, list):
if not summarized[section_id][component_id]:
summarized[section_id][component_id] = [val]
else:
summarized[section_id][component_id].append(val)
elif isinstance(component_data[val], (int, long)):
if not summarized[section_id][component_id]:
summarized[section_id][component_id] = {
val: component_data[val]
}
else:
if val not in summarized[section_id][component_id]:
summarized[section_id][component_id][val] = component_data[val]
else:
summarized[section_id][component_id][val] += component_data[val]
return FormStatisticsTO(submissions=total_count, statistics=summarized)
def remove_submission_from_shard(shard, submission):
# type: (FormStatisticsShard, FormSubmission) -> FormStatisticsShard
sections = FormSectionValueTO.from_list(submission.sections)
shard.count -= 1
for section in sections:
section_data = shard.data.get(section.id)
if not section_data:
continue
for component in section.components:
component_data = section_data.get(component.id)
if not component_data:
continue
if isinstance(component, TextInputComponentValueTO):
_decrement_value_count(component_data, component.value)
elif isinstance(component, MultiSelectComponentValueTO):
for value in component.values:
_decrement_value_count(component_data, value)
elif isinstance(component, DatetimeComponentValueTO):
if component.year == 0:
# format == 'time'
# 09:15 -> 915
val = component.hour * 100 + component.minute
_decrement_value_count(component_data, val)
else:
naive_iso_str = datetime(year=component.year, month=component.month, day=component.day,
hour=component.hour, minute=component.minute).isoformat()
_decrement_value_count(component_data, naive_iso_str)
elif isinstance(component, FileComponentValueTO):
comp_stats = component.to_statistics()
if not isinstance(component_data, list):
component_data = []
component_data = [c for c in component_data if c != comp_stats]
elif isinstance(component, LocationComponentValueTO):
comp_stats = component.to_statistics()
if not isinstance(component_data, list):
component_data = []
component_data = [c for c in component_data if c != comp_stats]
section_data[component.id] = component_data
shard.data[section.id] = section_data
return shard
| # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from datetime import datetime
from google.appengine.ext import ndb
from rogerthat.rpc import users
from rogerthat.service.api.forms import service_api
from rogerthat.to.forms import FormSectionValueTO, TextInputComponentValueTO, MultiSelectComponentValueTO, \
DatetimeComponentValueTO, FileComponentValueTO, LocationComponentValueTO
from solutions.common.bizz.forms.util import remove_sensitive_answers
from solutions.common.models.forms import FormStatisticsShardConfig, FormStatisticsShard
from solutions.common.to.forms import FormStatisticsTO
def get_random_shard_number(form_id):
shard_config_key = FormStatisticsShardConfig.create_key(form_id)
shard_config = shard_config_key.get()
if not shard_config:
shard_config = FormStatisticsShardConfig(key=shard_config_key)
shard_config.put()
return shard_config.get_random_shard()
@ndb.transactional(xg=True)
def update_form_statistics(service_user, submission, shard_number):
# type: (users.User, FormSubmission, int) -> None
with users.set_user(service_user):
form = service_api.get_form(submission.form_id)
to_put = []
shard = _update_form_statistics(submission, form, shard_number)
submission.statistics_shard_id = shard.key.id()
to_put.append(submission)
to_put.append(shard)
ndb.put_multi(to_put)
@ndb.transactional()
def _update_form_statistics(submission, form, shard_number):
# type: (FormSubmission, DynamicFormTO, int) -> FormStatisticsShard
shard_key = FormStatisticsShard.create_key(
FormStatisticsShard.SHARD_KEY_TEMPLATE % (submission.form_id, shard_number))
shard = shard_key.get()
if not shard:
shard = FormStatisticsShard(key=shard_key)
sections = remove_sensitive_answers(form.sections, FormSectionValueTO.from_list(submission.sections))
shard.data = _get_shard_data(shard.data or {}, sections)
shard.count += 1
return shard
def _get_shard_data(summarized_data, sections):
# type: (dict, list[FormSectionValueTO]) -> dict
for section in sections:
if section.id not in summarized_data:
summarized_data[section.id] = {}
section_data = summarized_data[section.id]
for component in section.components:
if component.id not in section_data:
section_data[component.id] = {}
component_data = section_data[component.id]
if isinstance(component, TextInputComponentValueTO):
_increment_value_count(component_data, component.value)
elif isinstance(component, MultiSelectComponentValueTO):
for value in component.values:
_increment_value_count(component_data, value)
elif isinstance(component, DatetimeComponentValueTO):
if component.year == 0:
# format == 'time'
# 09:15 -> 915
val = component.hour * 100 + component.minute
_increment_value_count(component_data, val)
else:
# without Z at the end because this was selected in the user his timezone
naive_iso_str = datetime(year=component.year, month=component.month, day=component.day,
hour=component.hour, minute=component.minute).isoformat()
_increment_value_count(component_data, naive_iso_str)
elif isinstance(component, FileComponentValueTO):
if not isinstance(component_data, list):
component_data = []
component_data.append(component.to_statistics())
elif isinstance(component, LocationComponentValueTO):
if not isinstance(component_data, list):
component_data = []
component_data.append(component.to_statistics())
section_data[component.id] = component_data
summarized_data[section.id] = section_data
return summarized_data
def _increment_value_count(dictionary, value):
# avoid 'null' string as value
if value is None:
value = ''
if value not in dictionary:
dictionary[value] = 1
else:
dictionary[value] += 1
return dictionary
def _decrement_value_count(dictionary, value):
# avoid 'null' string as value
if value is None:
value = ''
if value in dictionary:
if dictionary[value] == 1:
del dictionary[value]
else:
dictionary[value] -= 1
return dictionary
def get_all_statistic_keys(form_id):
keys = [FormStatisticsShardConfig.create_key(form_id)]
keys.extend(FormStatisticsShardConfig.get_all_keys(form_id))
return keys
@ndb.transactional()
def increase_shards(form_id, shard_count):
"""Increase the number of shards for a given sharded counter. """
key = FormStatisticsShardConfig.create_key(form_id)
config = key.get() or FormStatisticsShardConfig(key=key)
if config.num_shards < shard_count:
config.shard_count = shard_count
config.put()
def get_form_statistics(form):
# type: (Form) -> FormStatisticsTO
shards = ndb.get_multi(FormStatisticsShardConfig.get_all_keys(form.id)) # type: list[FormStatisticsShard]
# Summarize statistics from all shards into one object
total_count = 0
summarized = {}
for shard in shards:
if not shard:
continue
total_count += shard.count
for section_id in shard.data:
section_data = shard.data[section_id]
if section_id not in summarized:
summarized[section_id] = {}
for component_id in section_data:
component_data = section_data[component_id]
if component_id not in summarized[section_id]:
summarized[section_id][component_id] = None
for val in component_data:
if isinstance(val, list):
if not summarized[section_id][component_id]:
summarized[section_id][component_id] = [val]
else:
summarized[section_id][component_id].append(val)
elif isinstance(component_data[val], (int, long)):
if not summarized[section_id][component_id]:
summarized[section_id][component_id] = {
val: component_data[val]
}
else:
if val not in summarized[section_id][component_id]:
summarized[section_id][component_id][val] = component_data[val]
else:
summarized[section_id][component_id][val] += component_data[val]
return FormStatisticsTO(submissions=total_count, statistics=summarized)
def remove_submission_from_shard(shard, submission):
# type: (FormStatisticsShard, FormSubmission) -> FormStatisticsShard
sections = FormSectionValueTO.from_list(submission.sections)
shard.count -= 1
for section in sections:
section_data = shard.data.get(section.id)
if not section_data:
continue
for component in section.components:
component_data = section_data.get(component.id)
if not component_data:
continue
if isinstance(component, TextInputComponentValueTO):
_decrement_value_count(component_data, component.value)
elif isinstance(component, MultiSelectComponentValueTO):
for value in component.values:
_decrement_value_count(component_data, value)
elif isinstance(component, DatetimeComponentValueTO):
if component.year == 0:
# format == 'time'
# 09:15 -> 915
val = component.hour * 100 + component.minute
_decrement_value_count(component_data, val)
else:
naive_iso_str = datetime(year=component.year, month=component.month, day=component.day,
hour=component.hour, minute=component.minute).isoformat()
_decrement_value_count(component_data, naive_iso_str)
elif isinstance(component, FileComponentValueTO):
comp_stats = component.to_statistics()
if not isinstance(component_data, list):
component_data = []
component_data = [c for c in component_data if c != comp_stats]
elif isinstance(component, LocationComponentValueTO):
comp_stats = component.to_statistics()
if not isinstance(component_data, list):
component_data = []
component_data = [c for c in component_data if c != comp_stats]
section_data[component.id] = component_data
shard.data[section.id] = section_data
return shard
| en | 0.803268 | # -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ # type: (users.User, FormSubmission, int) -> None # type: (FormSubmission, DynamicFormTO, int) -> FormStatisticsShard # type: (dict, list[FormSectionValueTO]) -> dict # format == 'time' # 09:15 -> 915 # without Z at the end because this was selected in the user his timezone # avoid 'null' string as value # avoid 'null' string as value Increase the number of shards for a given sharded counter. # type: (Form) -> FormStatisticsTO # type: list[FormStatisticsShard] # Summarize statistics from all shards into one object # type: (FormStatisticsShard, FormSubmission) -> FormStatisticsShard # format == 'time' # 09:15 -> 915 | 1.784735 | 2 |
load_data.py | wheemyungshin/StockPrediction_CapstoneDA2021-1 | 1 | 6622762 | <gh_stars>1-10
max_test_size = 11
simulation_size = 4
sample_step = 1
split_iter = 37
split_stride = 1
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import sys
import warnings
import torch
import torch.nn as nn
import torch.nn.modules.conv as conv
if not sys.warnoptions:
warnings.simplefilter('ignore')
import FinanceDataReader as fdr
# sample execution (requires torchvision)0
from PIL import Image
from torchvision import transforms
def load_data():
start_date = '2016-01-02'
end_date = '2020-12-31'
max_train_size = 1200
df_kospi = fdr.StockListing('KOSPI')
df_kospi.head()
df2 = fdr.DataReader('KS11', start_date, end_date)
print(df2)
df_ratio2 = df2.iloc[:, 0:1].astype('float32').fillna(0)
df_log2 = pd.DataFrame(df_ratio2)
df_dict = {
0 : fdr.DataReader('IXIC', start_date, end_date),#나스닥
1 : fdr.DataReader('KQ11', start_date, end_date),#코스닥
2 : fdr.DataReader('USD/KRW', start_date, end_date),#달러/원
3 : fdr.DataReader('KS50', start_date, end_date),#코스피50
4 : fdr.DataReader('KS100', start_date, end_date),#코스피100
5 : fdr.DataReader('KS200', start_date, end_date),#코스피200
6 : fdr.DataReader('NG', start_date, end_date),#천연가스 선물
7 : fdr.DataReader('ZG', start_date, end_date),#금 선물
8 : fdr.DataReader('VCB', start_date, end_date),#베트남무역은행
#9 : fdr.DataReader('KR1YT=RR', start_date, end_date),#한국채권1년수익률
9 : fdr.DataReader('US1MT=X', start_date, end_date),#미국채권1개월수익률
}
'''
df_dict = {
0 : fdr.DataReader('IXIC', start_date, end_date),#나스닥
1 : fdr.DataReader('USD/EUR', start_date, end_date),#달러/유로 1 : fdr.DataReader('KQ11', start_date, end_date),#코스닥
2 : fdr.DataReader('USD/KRW', start_date, end_date),#달러/원
3 : fdr.DataReader('KS50', start_date, end_date),#코스피50
4 : fdr.DataReader('KS100', start_date, end_date),#코스피100
5 : fdr.DataReader('KS200', start_date, end_date),#코스피200
6 : fdr.DataReader('TSE', start_date, end_date),#도쿄 증권거래소 6 : fdr.DataReader('NG', start_date, end_date),#천연가스 선물
7 : fdr.DataReader('ZG', start_date, end_date),#금 선물
8 : fdr.DataReader('VCB', start_date, end_date),#베트남무역은행
9 : fdr.DataReader('KR1YT=RR', start_date, end_date),#한국국채1년수익률
10 : fdr.DataReader('US1MT=X', start_date, end_date),#미국국채1개월수익률
11 : fdr.DataReader('KR10YT=RR', start_date, end_date),#한국국채10년수익률
12 : fdr.DataReader('US10YT=X', start_date, end_date),#미국국채10개월수익률
}
'''
for i in range(len(df_dict)):
extra_df = df_dict[i]
df_ratio_extra = extra_df.iloc[:, 0:1].astype('float32').fillna(0) #((extra_df.iloc[:, 0:1].astype('float32') - extra_df.iloc[:, 0:1].shift().astype('float32')) / extra_df.iloc[:, 0:1].shift().astype('float32')).fillna(0)
df_log_extra = pd.DataFrame(df_ratio_extra)
df_log2 = pd.concat([df_log2, df_log_extra],axis=1)
df_trains = np.array([])
df_tests = np.array([])
df_vals = np.array([])
df_val_targets = np.array([])
df_ratios = np.array([])
df_volumes = np.array([])
scaler = MinMaxScaler()
stock_names = []
stock_dates = []
print(np.flip(df_kospi.to_numpy(), axis=0).shape)
read_lines = np.flip(df_kospi.to_numpy(), axis=0)[:100]
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[3200:3300], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[2200:2700], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[3400:3550], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[1600:1900], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[5012:5025], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[2900:3180], axis=0)
min_train_size = 560
for line in np.flip(read_lines, axis=0):
try:
df = fdr.DataReader(line[0], start_date, end_date)
num_vals = df.iloc[:, 0:1].astype(bool).sum(axis=0)
if len(num_vals) == 1:
num_vals = int(num_vals)
else:
num_vals = 0
#print(num_vals)
#print(max_train_size + max_test_size)
if num_vals > max_train_size + max_test_size:
df_ratio = df.iloc[:, 3].astype('float32')
df_log1 = pd.DataFrame(df_ratio)
df_ratios = np.append(df_ratios, df_ratio.to_numpy())
for j in range(0,split_iter):
split_point_start = j * max_test_size
split_point_end = (split_iter - j + 1) * max_test_size
df_train1 = df_log1.iloc[-max_train_size+split_point_start:-split_point_end]
df_test1 = df_log1.iloc[-split_point_end:-split_point_end+max_test_size]
df_train2 = df_log2.iloc[:]#df_log2.iloc[-max_test_size-max_train_size+split_point_start:-split_point_end]
df_test2 = df_log2.iloc[:]#df_log2.iloc[-split_point_end:-split_point_end+max_test_size]
df_train =pd.concat([df_train1, df_train2],axis=1).dropna(axis=0)[-min_train_size:]
df_test = pd.concat([df_test1, df_test2],axis=1).dropna(axis=0)
if df_test.shape[0] == 0:
print("NAN detected!:", line[2])
continue
for date in df_train.index:
if date not in stock_dates:
stock_dates.append(date)
#print(df_train)
indexes = list(df_train.index)#[::sample_step]
#df_train = df_train.rolling(sample_step).mean()[::sample_step][1:]
df_train_ = np.array([])
previous_train = np.zeros(df_train.shape[1])
for num, i in enumerate(df_train.to_numpy()):#[::sample_step]):
if num == 0:
df_train_ = np.expand_dims(previous_train, axis=0)
else:
if (previous_train == 0).any():
print(previous_train)
new_item = (i - previous_train) / previous_train
df_train_ = np.append(df_train_, np.expand_dims(new_item, axis=0), axis=0)
previous_train = i
df_train = df_train_
df_test = df_test.to_numpy()
df_test = np.array([(df_test[-1] - df_test[0])/ df_test[0] >= 0.02, (df_test[-1] - df_test[0])/ df_test[0] < -0.02])
df_test = np.append(df_test, np.expand_dims(np.logical_not(df_test[0]) * np.logical_not(df_test[1]), axis=0), axis=0)
# if min_train_size > df_train.shape[0]:
# min_train_size = df_train.shape[0]
# else:
# df_train = df_train[-min_train_size:]
test_size = df_test.shape[0]
df_train_np = np.expand_dims(df_train, axis=0)
df_test_np = np.expand_dims(df_test, axis=0)
#print("df_train_np: ",df_train_np)
if df_train_np[np.isnan(df_train_np)].size > 0:
print("NAN detected!!:", line[2])
continue
if j >= (split_iter-2):# * split_stride:
if df_vals.size == 0:
df_vals = df_train_np
df_val_targets = df_test_np
else:
df_vals = np.append(df_vals, df_train_np, axis=0)
df_val_targets = np.append(df_val_targets, df_test_np, axis=0)
else:
if df_trains.size == 0:
df_trains = df_train_np
df_tests = df_test_np
else:
df_trains = np.append(df_trains, df_train_np, axis=0)
df_tests = np.append(df_tests, df_test_np, axis=0)
if j == split_iter - 1:
print("Added: ", line[2])
stock_names.append(line[2])
except ValueError as e:
print(e)
df_trains = np.transpose(df_trains, (0,2,1))
df_tests = np.transpose(df_tests, (0,2,1))
df_vals = np.transpose(df_vals, (0,2,1))
df_val_targets = np.transpose(df_val_targets, (0,2,1))
print(df_trains.shape, df_tests.shape)
print(df_vals.shape, df_val_targets.shape)
return df_trains, df_tests, df_vals, df_val_targets
| max_test_size = 11
simulation_size = 4
sample_step = 1
split_iter = 37
split_stride = 1
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import sys
import warnings
import torch
import torch.nn as nn
import torch.nn.modules.conv as conv
if not sys.warnoptions:
warnings.simplefilter('ignore')
import FinanceDataReader as fdr
# sample execution (requires torchvision)0
from PIL import Image
from torchvision import transforms
def load_data():
start_date = '2016-01-02'
end_date = '2020-12-31'
max_train_size = 1200
df_kospi = fdr.StockListing('KOSPI')
df_kospi.head()
df2 = fdr.DataReader('KS11', start_date, end_date)
print(df2)
df_ratio2 = df2.iloc[:, 0:1].astype('float32').fillna(0)
df_log2 = pd.DataFrame(df_ratio2)
df_dict = {
0 : fdr.DataReader('IXIC', start_date, end_date),#나스닥
1 : fdr.DataReader('KQ11', start_date, end_date),#코스닥
2 : fdr.DataReader('USD/KRW', start_date, end_date),#달러/원
3 : fdr.DataReader('KS50', start_date, end_date),#코스피50
4 : fdr.DataReader('KS100', start_date, end_date),#코스피100
5 : fdr.DataReader('KS200', start_date, end_date),#코스피200
6 : fdr.DataReader('NG', start_date, end_date),#천연가스 선물
7 : fdr.DataReader('ZG', start_date, end_date),#금 선물
8 : fdr.DataReader('VCB', start_date, end_date),#베트남무역은행
#9 : fdr.DataReader('KR1YT=RR', start_date, end_date),#한국채권1년수익률
9 : fdr.DataReader('US1MT=X', start_date, end_date),#미국채권1개월수익률
}
'''
df_dict = {
0 : fdr.DataReader('IXIC', start_date, end_date),#나스닥
1 : fdr.DataReader('USD/EUR', start_date, end_date),#달러/유로 1 : fdr.DataReader('KQ11', start_date, end_date),#코스닥
2 : fdr.DataReader('USD/KRW', start_date, end_date),#달러/원
3 : fdr.DataReader('KS50', start_date, end_date),#코스피50
4 : fdr.DataReader('KS100', start_date, end_date),#코스피100
5 : fdr.DataReader('KS200', start_date, end_date),#코스피200
6 : fdr.DataReader('TSE', start_date, end_date),#도쿄 증권거래소 6 : fdr.DataReader('NG', start_date, end_date),#천연가스 선물
7 : fdr.DataReader('ZG', start_date, end_date),#금 선물
8 : fdr.DataReader('VCB', start_date, end_date),#베트남무역은행
9 : fdr.DataReader('KR1YT=RR', start_date, end_date),#한국국채1년수익률
10 : fdr.DataReader('US1MT=X', start_date, end_date),#미국국채1개월수익률
11 : fdr.DataReader('KR10YT=RR', start_date, end_date),#한국국채10년수익률
12 : fdr.DataReader('US10YT=X', start_date, end_date),#미국국채10개월수익률
}
'''
for i in range(len(df_dict)):
extra_df = df_dict[i]
df_ratio_extra = extra_df.iloc[:, 0:1].astype('float32').fillna(0) #((extra_df.iloc[:, 0:1].astype('float32') - extra_df.iloc[:, 0:1].shift().astype('float32')) / extra_df.iloc[:, 0:1].shift().astype('float32')).fillna(0)
df_log_extra = pd.DataFrame(df_ratio_extra)
df_log2 = pd.concat([df_log2, df_log_extra],axis=1)
df_trains = np.array([])
df_tests = np.array([])
df_vals = np.array([])
df_val_targets = np.array([])
df_ratios = np.array([])
df_volumes = np.array([])
scaler = MinMaxScaler()
stock_names = []
stock_dates = []
print(np.flip(df_kospi.to_numpy(), axis=0).shape)
read_lines = np.flip(df_kospi.to_numpy(), axis=0)[:100]
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[3200:3300], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[2200:2700], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[3400:3550], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[1600:1900], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[5012:5025], axis=0)
read_lines = np.append(read_lines, np.flip(df_kospi.to_numpy(), axis=0)[2900:3180], axis=0)
min_train_size = 560
for line in np.flip(read_lines, axis=0):
try:
df = fdr.DataReader(line[0], start_date, end_date)
num_vals = df.iloc[:, 0:1].astype(bool).sum(axis=0)
if len(num_vals) == 1:
num_vals = int(num_vals)
else:
num_vals = 0
#print(num_vals)
#print(max_train_size + max_test_size)
if num_vals > max_train_size + max_test_size:
df_ratio = df.iloc[:, 3].astype('float32')
df_log1 = pd.DataFrame(df_ratio)
df_ratios = np.append(df_ratios, df_ratio.to_numpy())
for j in range(0,split_iter):
split_point_start = j * max_test_size
split_point_end = (split_iter - j + 1) * max_test_size
df_train1 = df_log1.iloc[-max_train_size+split_point_start:-split_point_end]
df_test1 = df_log1.iloc[-split_point_end:-split_point_end+max_test_size]
df_train2 = df_log2.iloc[:]#df_log2.iloc[-max_test_size-max_train_size+split_point_start:-split_point_end]
df_test2 = df_log2.iloc[:]#df_log2.iloc[-split_point_end:-split_point_end+max_test_size]
df_train =pd.concat([df_train1, df_train2],axis=1).dropna(axis=0)[-min_train_size:]
df_test = pd.concat([df_test1, df_test2],axis=1).dropna(axis=0)
if df_test.shape[0] == 0:
print("NAN detected!:", line[2])
continue
for date in df_train.index:
if date not in stock_dates:
stock_dates.append(date)
#print(df_train)
indexes = list(df_train.index)#[::sample_step]
#df_train = df_train.rolling(sample_step).mean()[::sample_step][1:]
df_train_ = np.array([])
previous_train = np.zeros(df_train.shape[1])
for num, i in enumerate(df_train.to_numpy()):#[::sample_step]):
if num == 0:
df_train_ = np.expand_dims(previous_train, axis=0)
else:
if (previous_train == 0).any():
print(previous_train)
new_item = (i - previous_train) / previous_train
df_train_ = np.append(df_train_, np.expand_dims(new_item, axis=0), axis=0)
previous_train = i
df_train = df_train_
df_test = df_test.to_numpy()
df_test = np.array([(df_test[-1] - df_test[0])/ df_test[0] >= 0.02, (df_test[-1] - df_test[0])/ df_test[0] < -0.02])
df_test = np.append(df_test, np.expand_dims(np.logical_not(df_test[0]) * np.logical_not(df_test[1]), axis=0), axis=0)
# if min_train_size > df_train.shape[0]:
# min_train_size = df_train.shape[0]
# else:
# df_train = df_train[-min_train_size:]
test_size = df_test.shape[0]
df_train_np = np.expand_dims(df_train, axis=0)
df_test_np = np.expand_dims(df_test, axis=0)
#print("df_train_np: ",df_train_np)
if df_train_np[np.isnan(df_train_np)].size > 0:
print("NAN detected!!:", line[2])
continue
if j >= (split_iter-2):# * split_stride:
if df_vals.size == 0:
df_vals = df_train_np
df_val_targets = df_test_np
else:
df_vals = np.append(df_vals, df_train_np, axis=0)
df_val_targets = np.append(df_val_targets, df_test_np, axis=0)
else:
if df_trains.size == 0:
df_trains = df_train_np
df_tests = df_test_np
else:
df_trains = np.append(df_trains, df_train_np, axis=0)
df_tests = np.append(df_tests, df_test_np, axis=0)
if j == split_iter - 1:
print("Added: ", line[2])
stock_names.append(line[2])
except ValueError as e:
print(e)
df_trains = np.transpose(df_trains, (0,2,1))
df_tests = np.transpose(df_tests, (0,2,1))
df_vals = np.transpose(df_vals, (0,2,1))
df_val_targets = np.transpose(df_val_targets, (0,2,1))
print(df_trains.shape, df_tests.shape)
print(df_vals.shape, df_val_targets.shape)
return df_trains, df_tests, df_vals, df_val_targets | en | 0.308986 | # sample execution (requires torchvision)0 #나스닥 #코스닥 #달러/원 #코스피50 #코스피100 #코스피200 #천연가스 선물 #금 선물 #베트남무역은행 #9 : fdr.DataReader('KR1YT=RR', start_date, end_date),#한국채권1년수익률 #미국채권1개월수익률 df_dict = { 0 : fdr.DataReader('IXIC', start_date, end_date),#나스닥 1 : fdr.DataReader('USD/EUR', start_date, end_date),#달러/유로 1 : fdr.DataReader('KQ11', start_date, end_date),#코스닥 2 : fdr.DataReader('USD/KRW', start_date, end_date),#달러/원 3 : fdr.DataReader('KS50', start_date, end_date),#코스피50 4 : fdr.DataReader('KS100', start_date, end_date),#코스피100 5 : fdr.DataReader('KS200', start_date, end_date),#코스피200 6 : fdr.DataReader('TSE', start_date, end_date),#도쿄 증권거래소 6 : fdr.DataReader('NG', start_date, end_date),#천연가스 선물 7 : fdr.DataReader('ZG', start_date, end_date),#금 선물 8 : fdr.DataReader('VCB', start_date, end_date),#베트남무역은행 9 : fdr.DataReader('KR1YT=RR', start_date, end_date),#한국국채1년수익률 10 : fdr.DataReader('US1MT=X', start_date, end_date),#미국국채1개월수익률 11 : fdr.DataReader('KR10YT=RR', start_date, end_date),#한국국채10년수익률 12 : fdr.DataReader('US10YT=X', start_date, end_date),#미국국채10개월수익률 } #((extra_df.iloc[:, 0:1].astype('float32') - extra_df.iloc[:, 0:1].shift().astype('float32')) / extra_df.iloc[:, 0:1].shift().astype('float32')).fillna(0) #print(num_vals) #print(max_train_size + max_test_size) #df_log2.iloc[-max_test_size-max_train_size+split_point_start:-split_point_end] #df_log2.iloc[-split_point_end:-split_point_end+max_test_size] #print(df_train) #[::sample_step] #df_train = df_train.rolling(sample_step).mean()[::sample_step][1:] #[::sample_step]): # if min_train_size > df_train.shape[0]: # min_train_size = df_train.shape[0] # else: # df_train = df_train[-min_train_size:] #print("df_train_np: ",df_train_np) # * split_stride: | 2.455578 | 2 |
test/test_seq_tools.py | wckdouglas/tgirt_seq_tools | 7 | 6622763 | <gh_stars>1-10
#!/usr/bin/env python
import filecmp
import logging
import os
from collections import defaultdict
from sequencing_tools.fastq_tools import readfq
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(os.path.basename(__file__))
PROG_PREFIX = ""
if os.environ["_"].endswith("poetry"):
PROG_PREFIX = "poetry run "
logger.info("Using poetry")
else:
logger.info("Using python")
test_data_path = os.path.dirname(os.path.realpath(__file__)) + "/data"
def run_system(cmd):
logger.info("Running %s" % cmd)
os.system(cmd)
def test_bam():
in_bam = test_data_path + "/test.bam"
out_bed = test_data_path + "/out.bed"
command = (
"{PREFIX} seqtools bam2bed -i {in_bam} --primary "
"| sort -k1,1 -k2,2n -k3,3n "
"| {PREFIX} seqtools dedup -i - "
"> {out_bed}".format(in_bam=in_bam, out_bed=out_bed, PREFIX=PROG_PREFIX)
)
run_system(command)
assert filecmp.cmp(out_bed, test_data_path + "/test.bed")
os.remove(out_bed)
def test_multi():
in_bam = test_data_path + "/multi.bam"
out_bam = test_data_path + "/multi.out"
command = "{PREFIX} seqtools filterMulti -i {in_bam} -o - | samtools view > {out_bam}".format(
in_bam=in_bam, out_bam=out_bam, PREFIX=PROG_PREFIX
)
run_system(command)
assert filecmp.cmp(out_bam, test_data_path + "/multi.result")
os.remove(out_bam)
def same_fq(fq1, fq2):
id_dict1 = defaultdict(set)
for seqid, seq, qual in readfq(fq1):
id_dict1[seqid].add(seq + qual)
id_dict2 = defaultdict(set)
for seqid, seq, qual in readfq(fq2):
id_dict2[seqid].add(seq + qual)
return id_dict1 == id_dict2
def test_correct():
in_bam = test_data_path + "/tag.bam"
out_fq = test_data_path + "/tag.fq"
command = "{PREFIX} seqtools demux -i {in_bam} -o {out_fq} -c -t RX".format(
in_bam=in_bam, out_fq=out_fq, PREFIX=PROG_PREFIX
)
run_system(command)
assert same_fq(out_fq, test_data_path + "/corrected.conserve.fq")
command = "{PREFIX} seqtools demux -i {in_bam} -o {out_fq} -t RX".format(
in_bam=in_bam, out_fq=out_fq, PREFIX=PROG_PREFIX
)
run_system(command)
assert same_fq(out_fq, test_data_path + "/corrected.qual.fq")
os.remove(out_fq)
def test_filter():
in_bam = test_data_path + "/tag.bam"
out_bam = test_data_path + "/filtered.out"
command = "{PREFIX} seqtools filterSoftClip --pe -s 0 -i {in_bam} -o - | samtools view > {out_bam}".format(
in_bam=in_bam, out_bam=out_bam, PREFIX=PROG_PREFIX
)
run_system(command)
assert filecmp.cmp(out_bam, test_data_path + "/clipped.result")
os.remove(out_bam)
def test_stranded_base_count():
golden_file = test_data_path + "/pileup.txt"
command = (
"{PREFIX} seqtools pileup -i {path}/MT_TF.bam "
"-f {path}/MT_TF.fa -c 0 --min_coverage 0 -q 0 "
"> {path}/test_pileup.txt".format(path=test_data_path, PREFIX=PROG_PREFIX)
)
run_system(command)
assert filecmp.cmp(golden_file, test_data_path + "/test_pileup.txt")
| #!/usr/bin/env python
import filecmp
import logging
import os
from collections import defaultdict
from sequencing_tools.fastq_tools import readfq
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(os.path.basename(__file__))
PROG_PREFIX = ""
if os.environ["_"].endswith("poetry"):
PROG_PREFIX = "poetry run "
logger.info("Using poetry")
else:
logger.info("Using python")
test_data_path = os.path.dirname(os.path.realpath(__file__)) + "/data"
def run_system(cmd):
logger.info("Running %s" % cmd)
os.system(cmd)
def test_bam():
in_bam = test_data_path + "/test.bam"
out_bed = test_data_path + "/out.bed"
command = (
"{PREFIX} seqtools bam2bed -i {in_bam} --primary "
"| sort -k1,1 -k2,2n -k3,3n "
"| {PREFIX} seqtools dedup -i - "
"> {out_bed}".format(in_bam=in_bam, out_bed=out_bed, PREFIX=PROG_PREFIX)
)
run_system(command)
assert filecmp.cmp(out_bed, test_data_path + "/test.bed")
os.remove(out_bed)
def test_multi():
in_bam = test_data_path + "/multi.bam"
out_bam = test_data_path + "/multi.out"
command = "{PREFIX} seqtools filterMulti -i {in_bam} -o - | samtools view > {out_bam}".format(
in_bam=in_bam, out_bam=out_bam, PREFIX=PROG_PREFIX
)
run_system(command)
assert filecmp.cmp(out_bam, test_data_path + "/multi.result")
os.remove(out_bam)
def same_fq(fq1, fq2):
id_dict1 = defaultdict(set)
for seqid, seq, qual in readfq(fq1):
id_dict1[seqid].add(seq + qual)
id_dict2 = defaultdict(set)
for seqid, seq, qual in readfq(fq2):
id_dict2[seqid].add(seq + qual)
return id_dict1 == id_dict2
def test_correct():
in_bam = test_data_path + "/tag.bam"
out_fq = test_data_path + "/tag.fq"
command = "{PREFIX} seqtools demux -i {in_bam} -o {out_fq} -c -t RX".format(
in_bam=in_bam, out_fq=out_fq, PREFIX=PROG_PREFIX
)
run_system(command)
assert same_fq(out_fq, test_data_path + "/corrected.conserve.fq")
command = "{PREFIX} seqtools demux -i {in_bam} -o {out_fq} -t RX".format(
in_bam=in_bam, out_fq=out_fq, PREFIX=PROG_PREFIX
)
run_system(command)
assert same_fq(out_fq, test_data_path + "/corrected.qual.fq")
os.remove(out_fq)
def test_filter():
in_bam = test_data_path + "/tag.bam"
out_bam = test_data_path + "/filtered.out"
command = "{PREFIX} seqtools filterSoftClip --pe -s 0 -i {in_bam} -o - | samtools view > {out_bam}".format(
in_bam=in_bam, out_bam=out_bam, PREFIX=PROG_PREFIX
)
run_system(command)
assert filecmp.cmp(out_bam, test_data_path + "/clipped.result")
os.remove(out_bam)
def test_stranded_base_count():
golden_file = test_data_path + "/pileup.txt"
command = (
"{PREFIX} seqtools pileup -i {path}/MT_TF.bam "
"-f {path}/MT_TF.fa -c 0 --min_coverage 0 -q 0 "
"> {path}/test_pileup.txt".format(path=test_data_path, PREFIX=PROG_PREFIX)
)
run_system(command)
assert filecmp.cmp(golden_file, test_data_path + "/test_pileup.txt") | ru | 0.26433 | #!/usr/bin/env python | 2.267389 | 2 |
project/migrations/0005_summary_datecompleted.py | mahdiieh/kholasesaz | 0 | 6622764 | <filename>project/migrations/0005_summary_datecompleted.py
# Generated by Django 2.2.20 on 2022-02-05 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0004_remove_summary_created'),
]
operations = [
migrations.AddField(
model_name='summary',
name='datecompleted',
field=models.DateTimeField(blank=True, null=True),
),
]
| <filename>project/migrations/0005_summary_datecompleted.py
# Generated by Django 2.2.20 on 2022-02-05 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0004_remove_summary_created'),
]
operations = [
migrations.AddField(
model_name='summary',
name='datecompleted',
field=models.DateTimeField(blank=True, null=True),
),
]
| en | 0.793193 | # Generated by Django 2.2.20 on 2022-02-05 15:57 | 1.318902 | 1 |
ObasiEmmanuel/Phase 1/Python Basic 1/Day 4 task/Question five.py | CodedLadiesInnovateTech/-python-challenge-solutions | 6 | 6622765 | def checker():
number=int(input("Enter number "))
a=[1,5,8,3]
if number in a:
return True
else:
return False
print(checker())
| def checker():
number=int(input("Enter number "))
a=[1,5,8,3]
if number in a:
return True
else:
return False
print(checker())
| none | 1 | 3.726161 | 4 | |
typetest/analyse/mistyped_words_pie_chart.py | MasterMedo/typetest | 15 | 6622766 | <gh_stars>10-100
import pandas as pd
import matplotlib.pyplot as plt
from typetest.utils import (
validate_input_file_path,
damerau_levenshtein_distance,
)
@validate_input_file_path
def plot(input_file, filter_func=lambda c: True):
"""Plots a pie chart representing the shares of numbers of mistakes in
mistyped words.
"""
def distance(row):
return damerau_levenshtein_distance(row.word, row["mistype"])
def wrong_word_typed(row):
return row["distance"] >= max(len(row["word"]), len(row["mistype"]))
def word_skip(row):
return row["distance"] > 2 and row["word"].startswith(row["mistype"])
data_frame = pd.read_csv(
input_file, header=None, names=["word", "mistype", "timestamp"]
)
data_frame["distance"] = data_frame.apply(distance, axis=1)
data_frame["flag"] = data_frame.apply(
lambda row: not word_skip(row) and not wrong_word_typed(row), axis=1
)
data_frame = data_frame[data_frame["flag"]]
mistakes = data_frame["distance"].value_counts()
mistakes = list(zip(mistakes.index.to_list(), mistakes.to_list()))
fig, ax = plt.subplots()
labels, sizes = zip(*sorted(mistakes))
explode = [0] + [0.2] * (len(mistakes) - 1)
ax.pie(sizes, labels=labels, autopct="%1.1f%%", explode=explode)
# ax = sns.histplot(mistakes, stat="probability")
ax.set_title("number of mistakes made when typing a word")
plt.show()
| import pandas as pd
import matplotlib.pyplot as plt
from typetest.utils import (
validate_input_file_path,
damerau_levenshtein_distance,
)
@validate_input_file_path
def plot(input_file, filter_func=lambda c: True):
"""Plots a pie chart representing the shares of numbers of mistakes in
mistyped words.
"""
def distance(row):
return damerau_levenshtein_distance(row.word, row["mistype"])
def wrong_word_typed(row):
return row["distance"] >= max(len(row["word"]), len(row["mistype"]))
def word_skip(row):
return row["distance"] > 2 and row["word"].startswith(row["mistype"])
data_frame = pd.read_csv(
input_file, header=None, names=["word", "mistype", "timestamp"]
)
data_frame["distance"] = data_frame.apply(distance, axis=1)
data_frame["flag"] = data_frame.apply(
lambda row: not word_skip(row) and not wrong_word_typed(row), axis=1
)
data_frame = data_frame[data_frame["flag"]]
mistakes = data_frame["distance"].value_counts()
mistakes = list(zip(mistakes.index.to_list(), mistakes.to_list()))
fig, ax = plt.subplots()
labels, sizes = zip(*sorted(mistakes))
explode = [0] + [0.2] * (len(mistakes) - 1)
ax.pie(sizes, labels=labels, autopct="%1.1f%%", explode=explode)
# ax = sns.histplot(mistakes, stat="probability")
ax.set_title("number of mistakes made when typing a word")
plt.show() | en | 0.813089 | Plots a pie chart representing the shares of numbers of mistakes in mistyped words. # ax = sns.histplot(mistakes, stat="probability") | 3.038887 | 3 |
update_rosters2.py | amoliski/swgoh_vis | 0 | 6622767 | import swgoh
swgoh.update_rosters(True, part=2) | import swgoh
swgoh.update_rosters(True, part=2) | none | 1 | 1.086443 | 1 | |
Models/get_model.py | LinusWu/TENET-Training | 8 | 6622768 | <gh_stars>1-10
import sys
sys.path.append('./Models/')
from resnext import ResneXt as Resnext29
from resnet import ResNet,BasicBlock,Bottleneck
from torchvision_models import load_pretrained, pretrained_settings
import torch
import torch.nn.functional as F
import numpy as np
def Resnext29_init(config):
if config.dataset.lower() == 'cifar10':
model = Resnext29(num_classes=10)
if config.dataset.lower() == 'cifar100':
model = Resnext29(num_classes=100)
if config.dataset.lower() == 'tiny':
model = Resnext29(num_classes=200)
if len(config.resume_model_path) > 0:
from collections import OrderedDict
new_state_dict = OrderedDict()
pretrained_dict = torch.load(config.resume_model_path, map_location=lambda storage, loc: storage)
for k, v in pretrained_dict.items():
if k.startswith('module'):
name = k[7:]
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
return model
def Resnet50_init(config):
if config.dataset == 'ImageNet':
model = ResNet(Bottleneck, [3, 4, 6, 3],modified=False)
if config.pretrain:
settings = pretrained_settings['resnet50']['imagenet']
resnet = load_pretrained(model, 1000, settings)
return resnet
else:
return model
if config.dataset == 'CUB':
resnet = ResNet(Bottleneck, [3, 4, 6, 3],modified=True,modified_num_classes=200)
if config.pretrain:
if len(config.resume_model_path) > 0:
#resnet = torch.nn.DataParallel(model)
#resnet.cuda()
#resnet.load_state_dict(torch.load(config.resume_model_path))
from collections import OrderedDict
new_state_dict = OrderedDict()
pretrained_dict = torch.load(config.resume_model_path, map_location=lambda storage, loc: storage)
for k, v in pretrained_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
resnet.load_state_dict(new_state_dict)
else:
settings = pretrained_settings['resnet50']['imagenet']
resnet = load_pretrained(resnet, 1000, settings)
return resnet
else:
return resnet
def get_model_loss(config):
model = None
if config.model.lower() == 'resnext29':
model = Resnext29_init(config)
if config.model.lower() == 'resnet50':
model = Resnet50_init(config)
loss_fun = get_lossfunction(config)
return model, loss_fun
def get_lossfunction(config):
loss_fun = None
if config.loss.lower() == 'crossentropy':
loss_fun = F.cross_entropy
return loss_fun
def get_opti_scheduler(config, model,train_loader=None):
optimizer = None
lr_scheduler = None
if config.classifier_optimizer == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=config.classifier_learning_rate)
if config.classifier_optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=config.classifier_learning_rate, weight_decay=5e-4, momentum=0.9, nesterov=True)
if 'cifar10' in config.dataset.lower():
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: get_lr( # pylint: disable=g-long-lambda
step,
config.epoch * len(train_loader),
1, # lr_lambda computes multiplicative factor
1e-6 / config.classifier_learning_rate))
# lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.epoch , eta_min=0)
if 'cub' in config.dataset.lower():
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(config.lr_decay_step), gamma = config.lr_decay_gamma)
return optimizer, lr_scheduler
def get_lr(step, total_steps, lr_max, lr_min):
"""Compute learning rate according to cosine annealing schedule."""
return lr_min + (lr_max - lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi))
| import sys
sys.path.append('./Models/')
from resnext import ResneXt as Resnext29
from resnet import ResNet,BasicBlock,Bottleneck
from torchvision_models import load_pretrained, pretrained_settings
import torch
import torch.nn.functional as F
import numpy as np
def Resnext29_init(config):
if config.dataset.lower() == 'cifar10':
model = Resnext29(num_classes=10)
if config.dataset.lower() == 'cifar100':
model = Resnext29(num_classes=100)
if config.dataset.lower() == 'tiny':
model = Resnext29(num_classes=200)
if len(config.resume_model_path) > 0:
from collections import OrderedDict
new_state_dict = OrderedDict()
pretrained_dict = torch.load(config.resume_model_path, map_location=lambda storage, loc: storage)
for k, v in pretrained_dict.items():
if k.startswith('module'):
name = k[7:]
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
return model
def Resnet50_init(config):
if config.dataset == 'ImageNet':
model = ResNet(Bottleneck, [3, 4, 6, 3],modified=False)
if config.pretrain:
settings = pretrained_settings['resnet50']['imagenet']
resnet = load_pretrained(model, 1000, settings)
return resnet
else:
return model
if config.dataset == 'CUB':
resnet = ResNet(Bottleneck, [3, 4, 6, 3],modified=True,modified_num_classes=200)
if config.pretrain:
if len(config.resume_model_path) > 0:
#resnet = torch.nn.DataParallel(model)
#resnet.cuda()
#resnet.load_state_dict(torch.load(config.resume_model_path))
from collections import OrderedDict
new_state_dict = OrderedDict()
pretrained_dict = torch.load(config.resume_model_path, map_location=lambda storage, loc: storage)
for k, v in pretrained_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
resnet.load_state_dict(new_state_dict)
else:
settings = pretrained_settings['resnet50']['imagenet']
resnet = load_pretrained(resnet, 1000, settings)
return resnet
else:
return resnet
def get_model_loss(config):
model = None
if config.model.lower() == 'resnext29':
model = Resnext29_init(config)
if config.model.lower() == 'resnet50':
model = Resnet50_init(config)
loss_fun = get_lossfunction(config)
return model, loss_fun
def get_lossfunction(config):
loss_fun = None
if config.loss.lower() == 'crossentropy':
loss_fun = F.cross_entropy
return loss_fun
def get_opti_scheduler(config, model,train_loader=None):
optimizer = None
lr_scheduler = None
if config.classifier_optimizer == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=config.classifier_learning_rate)
if config.classifier_optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=config.classifier_learning_rate, weight_decay=5e-4, momentum=0.9, nesterov=True)
if 'cifar10' in config.dataset.lower():
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: get_lr( # pylint: disable=g-long-lambda
step,
config.epoch * len(train_loader),
1, # lr_lambda computes multiplicative factor
1e-6 / config.classifier_learning_rate))
# lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.epoch , eta_min=0)
if 'cub' in config.dataset.lower():
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(config.lr_decay_step), gamma = config.lr_decay_gamma)
return optimizer, lr_scheduler
def get_lr(step, total_steps, lr_max, lr_min):
"""Compute learning rate according to cosine annealing schedule."""
return lr_min + (lr_max - lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi)) | en | 0.382881 | #resnet = torch.nn.DataParallel(model) #resnet.cuda() #resnet.load_state_dict(torch.load(config.resume_model_path)) # remove `module.` # pylint: disable=g-long-lambda # lr_lambda computes multiplicative factor # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.epoch , eta_min=0) Compute learning rate according to cosine annealing schedule. | 2.327253 | 2 |
CODE/Scripts/schools/greatschoolsratings.py | jdills26/cse6242-project-code | 0 | 6622769 | import requests
import pandas as pd
from io import StringIO, BytesIO
from lxml import etree as et
API_KEY = '<GREATSCHOOLS.ORG API KEY GOES HERE>'
def generate_file(name, response):
d = {}
df = pd.DataFrame()
tree = et.fromstring(response.content)
for child in tree:
for children in child:
d[str(children.tag)] = str(children.text)
df = df.append(d, ignore_index=True)
df.to_csv(name + '.csv', sep=',')
if __name__ == "__main__":
elem_url = 'http://api.greatschools.org/schools/DC/Washington/public/elementary-schools?limit=-1&key={}'.format(API_KEY)
middle_url = 'http://api.greatschools.org/schools/DC/Washington/public/middle-schools?limit=-1&key={}'.format(API_KEY)
high_url = 'http://api.greatschools.org/schools/DC/Washington/public/high-schools?limit=-1&key={}'.format(API_KEY)
elem_schools = requests.get(elem_url)
middle_schools = requests.get(middle_url)
high_schools = requests.get(high_url)
generate_file('elementary', elem_schools)
generate_file('middle', middle_schools)
generate_file('high', high_schools)
| import requests
import pandas as pd
from io import StringIO, BytesIO
from lxml import etree as et
API_KEY = '<GREATSCHOOLS.ORG API KEY GOES HERE>'
def generate_file(name, response):
d = {}
df = pd.DataFrame()
tree = et.fromstring(response.content)
for child in tree:
for children in child:
d[str(children.tag)] = str(children.text)
df = df.append(d, ignore_index=True)
df.to_csv(name + '.csv', sep=',')
if __name__ == "__main__":
elem_url = 'http://api.greatschools.org/schools/DC/Washington/public/elementary-schools?limit=-1&key={}'.format(API_KEY)
middle_url = 'http://api.greatschools.org/schools/DC/Washington/public/middle-schools?limit=-1&key={}'.format(API_KEY)
high_url = 'http://api.greatschools.org/schools/DC/Washington/public/high-schools?limit=-1&key={}'.format(API_KEY)
elem_schools = requests.get(elem_url)
middle_schools = requests.get(middle_url)
high_schools = requests.get(high_url)
generate_file('elementary', elem_schools)
generate_file('middle', middle_schools)
generate_file('high', high_schools)
| none | 1 | 3.084067 | 3 | |
llorma_p/configs.py | JoonyoungYi/LLORMA-tensorflow | 9 | 6622770 | GPU_MEMORY_FRAC = 0.95
N_SHOT = 0
N_ANCHOR = 50
PRE_RANK = 5
PRE_LEARNING_RATE = 1e-4
PRE_LAMBDA = 10
LOCAL_RANK = 20
LOCAL_LEARNING_RATE = 1e-2
LOCAL_LAMBDA = 1e-3
BATCH_SIZE = 1000
USE_CACHE = True
| GPU_MEMORY_FRAC = 0.95
N_SHOT = 0
N_ANCHOR = 50
PRE_RANK = 5
PRE_LEARNING_RATE = 1e-4
PRE_LAMBDA = 10
LOCAL_RANK = 20
LOCAL_LEARNING_RATE = 1e-2
LOCAL_LAMBDA = 1e-3
BATCH_SIZE = 1000
USE_CACHE = True
| none | 1 | 1.060787 | 1 | |
slide/snippet/map_composition.py | TomohikoK/PyCat | 0 | 6622771 | <reponame>TomohikoK/PyCat<gh_stars>0
def double(arg: int) -> int:
return arg * 2
x: List[str] = ['a', 'bb']
x1 = list(map(double @ length_of_str, x))
x2 = list(map(double, list(map(length_of_str, x))))
assert x1 == x2 # どちらの値も[2, 4]
| def double(arg: int) -> int:
return arg * 2
x: List[str] = ['a', 'bb']
x1 = list(map(double @ length_of_str, x))
x2 = list(map(double, list(map(length_of_str, x))))
assert x1 == x2 # どちらの値も[2, 4] | ja | 0.407428 | # どちらの値も[2, 4] | 3.571981 | 4 |
ifttt/views.py | wikimedia/ifttt | 33 | 6622772 | # -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import operator
import urllib2
import feedparser
import flask
import flask.views
import werkzeug.contrib.cache
from .utils import url_to_uuid5, utc_to_epoch, utc_to_iso8601
__all__ = ('FeaturedFeedTriggerView',)
feed_cache = werkzeug.contrib.cache.SimpleCache()
class FeaturedFeedTriggerView(flask.views.MethodView):
"""Generic view for IFTT Triggers based on FeaturedFeeds."""
URL_FORMAT = 'http://{0.wiki}/w/api.php?action=featuredfeed&feed={0.feed}'
def get_feed(self):
"""Fetch and parse the feature feed for this class."""
url = self.URL_FORMAT.format(self)
feed = feed_cache.get(url)
if not feed:
feed = feedparser.parse(urllib2.urlopen(url))
feed_cache.set(url, feed, timeout=5 * 60)
return feed
def parse_entry(self, entry):
"""Parse a single feed entry into an IFTTT trigger item."""
id = url_to_uuid5(entry.id)
created_at = utc_to_iso8601(entry.published_parsed)
ts = utc_to_epoch(entry.published_parsed)
return {'created_at': created_at, 'meta': {'id': id, 'timestamp': ts}}
def get_items(self):
"""Get the set of items for this trigger."""
feed = self.get_feed()
feed.entries.sort(key=operator.attrgetter('published_parsed'),
reverse=True)
return map(self.parse_entry, feed.entries)
def post(self):
"""Handle POST requests."""
params = flask.request.get_json(force=True, silent=True) or {}
limit = params.get('limit', 50)
items = self.get_items()
items = items[:limit]
return flask.jsonify(data=items)
| # -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import operator
import urllib2
import feedparser
import flask
import flask.views
import werkzeug.contrib.cache
from .utils import url_to_uuid5, utc_to_epoch, utc_to_iso8601
__all__ = ('FeaturedFeedTriggerView',)
feed_cache = werkzeug.contrib.cache.SimpleCache()
class FeaturedFeedTriggerView(flask.views.MethodView):
"""Generic view for IFTT Triggers based on FeaturedFeeds."""
URL_FORMAT = 'http://{0.wiki}/w/api.php?action=featuredfeed&feed={0.feed}'
def get_feed(self):
"""Fetch and parse the feature feed for this class."""
url = self.URL_FORMAT.format(self)
feed = feed_cache.get(url)
if not feed:
feed = feedparser.parse(urllib2.urlopen(url))
feed_cache.set(url, feed, timeout=5 * 60)
return feed
def parse_entry(self, entry):
"""Parse a single feed entry into an IFTTT trigger item."""
id = url_to_uuid5(entry.id)
created_at = utc_to_iso8601(entry.published_parsed)
ts = utc_to_epoch(entry.published_parsed)
return {'created_at': created_at, 'meta': {'id': id, 'timestamp': ts}}
def get_items(self):
"""Get the set of items for this trigger."""
feed = self.get_feed()
feed.entries.sort(key=operator.attrgetter('published_parsed'),
reverse=True)
return map(self.parse_entry, feed.entries)
def post(self):
"""Handle POST requests."""
params = flask.request.get_json(force=True, silent=True) or {}
limit = params.get('limit', 50)
items = self.get_items()
items = items[:limit]
return flask.jsonify(data=items)
| en | 0.832032 | # -*- coding: utf-8 -*- Wikipedia channel for IFTTT ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Copyright 2015 <NAME> <<EMAIL>> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generic view for IFTT Triggers based on FeaturedFeeds. Fetch and parse the feature feed for this class. Parse a single feed entry into an IFTTT trigger item. Get the set of items for this trigger. Handle POST requests. | 2.406612 | 2 |
weather_api/weather_app/api_config/bar_chart.py | brian-duffy/yoyo-test | 0 | 6622773 | <reponame>brian-duffy/yoyo-test<filename>weather_api/weather_app/api_config/bar_chart.py
# -*- coding: utf-8 -*-
from graphos.sources.simple import SimpleDataSource
from graphos.renderers.gchart import LineChart, BarChart
def get_chart(data=None, chart='barchart'):
"""
Function to return a chart object for rendering in a view
:param data:
:return:
"""
all_temps = [x['main']['temp'] for i, x in(enumerate(data['list']))]
all_dates = [x['dt_txt'][0:10] for i, x in(enumerate(data['list']))]
_chart = [['Day', 'Temperature°C']]
_chart.extend([all_dates[i], all_temps[i]] for i, x in enumerate(all_dates))
# DataSource object
data_source = SimpleDataSource(data=_chart)
# Chart object
if chart == 'barchart':
chart = BarChart(data_source)
else:
chart = LineChart(data_source)
return chart
| # -*- coding: utf-8 -*-
from graphos.sources.simple import SimpleDataSource
from graphos.renderers.gchart import LineChart, BarChart
def get_chart(data=None, chart='barchart'):
"""
Function to return a chart object for rendering in a view
:param data:
:return:
"""
all_temps = [x['main']['temp'] for i, x in(enumerate(data['list']))]
all_dates = [x['dt_txt'][0:10] for i, x in(enumerate(data['list']))]
_chart = [['Day', 'Temperature°C']]
_chart.extend([all_dates[i], all_temps[i]] for i, x in enumerate(all_dates))
# DataSource object
data_source = SimpleDataSource(data=_chart)
# Chart object
if chart == 'barchart':
chart = BarChart(data_source)
else:
chart = LineChart(data_source)
return chart | en | 0.62018 | # -*- coding: utf-8 -*- Function to return a chart object for rendering in a view :param data: :return: # DataSource object # Chart object | 3.188679 | 3 |
richkit/test/retrieve/test_whois.py | kidmose/richkit | 12 | 6622774 | import unittest
from datetime import datetime
from richkit.retrieve import whois
class WhoisTestCase(unittest.TestCase):
# .dk domains give unknownTld exception !
def test_get_whois_info(self):
# last updated field skipped since it could be None
d = "www.google.com"
w = whois.get_whois_info(d)
self.assertTrue(len(w['registrar']) > 0)
# .com uses "thin" WHOIS, so we get expiry from both registry
# and registrar;
self.assertTrue(len(w['expiration_date']) == 2)
self.assertIsInstance(w['expiration_date'][0], datetime)
self.assertIsInstance(w['expiration_date'][1], datetime)
d = "www.cloudflare.com"
w = whois.get_whois_info(d)
self.assertTrue('registrar' in w)
self.assertTrue(len(w['registrar']) > 0)
self.assertTrue('expiration_date' in w)
# .com uses "thin" WHOIS, so we get expiry from both registry
# and registrar, but they are equal here, so only one is returned;
self.assertIsInstance(w['expiration_date'], datetime)
if __name__ == '__main__':
unittest.main()
| import unittest
from datetime import datetime
from richkit.retrieve import whois
class WhoisTestCase(unittest.TestCase):
# .dk domains give unknownTld exception !
def test_get_whois_info(self):
# last updated field skipped since it could be None
d = "www.google.com"
w = whois.get_whois_info(d)
self.assertTrue(len(w['registrar']) > 0)
# .com uses "thin" WHOIS, so we get expiry from both registry
# and registrar;
self.assertTrue(len(w['expiration_date']) == 2)
self.assertIsInstance(w['expiration_date'][0], datetime)
self.assertIsInstance(w['expiration_date'][1], datetime)
d = "www.cloudflare.com"
w = whois.get_whois_info(d)
self.assertTrue('registrar' in w)
self.assertTrue(len(w['registrar']) > 0)
self.assertTrue('expiration_date' in w)
# .com uses "thin" WHOIS, so we get expiry from both registry
# and registrar, but they are equal here, so only one is returned;
self.assertIsInstance(w['expiration_date'], datetime)
if __name__ == '__main__':
unittest.main()
| en | 0.87655 | # .dk domains give unknownTld exception ! # last updated field skipped since it could be None # .com uses "thin" WHOIS, so we get expiry from both registry # and registrar; # .com uses "thin" WHOIS, so we get expiry from both registry # and registrar, but they are equal here, so only one is returned; | 2.718499 | 3 |
src/upy_platform.py | abraha2d/pb4-firmware | 0 | 6622775 | <gh_stars>0
from os import uname
# noinspection PyUnresolvedReferences
from esp32 import NVS
# noinspection PyUnresolvedReferences
from machine import I2C, Pin, PWM, Signal, TouchPad
# noinspection PyUnresolvedReferences
from network import AP_IF, STA_IF, WLAN
from uasyncio import sleep_ms
class FakeSignal:
pass
version = 4 if "PottyBox 4.0" in uname().machine else 2
boot = Signal(0, Pin.IN, invert=True)
exhaust = Signal(13 if version == 4 else 14, Pin.OUT, value=0)
flush_1 = Signal(25 if version == 4 else 12, Pin.OUT, value=0)
flush_2 = Signal(26 if version == 4 else 13, Pin.OUT, value=0)
i2c = I2C(0, sda=Pin(21), scl=Pin(22))
nvs = NVS("pb4")
s1_led = Signal(2, Pin.OUT, value=0) if version == 4 else FakeSignal()
s1_int = Signal(4, Pin.IN, pull=Pin.PULL_UP) if version == 4 else FakeSignal()
s1_en = Signal(5, Pin.OUT, value=1, invert=True) if version == 4 else FakeSignal()
s2_led = Signal(18, Pin.OUT, value=0) if version == 4 else FakeSignal()
s2_int = Signal(19, Pin.IN, pull=Pin.PULL_UP) if version == 4 else FakeSignal()
s2_en = Signal(23, Pin.OUT, value=1, invert=True) if version == 4 else FakeSignal()
touch_1_pin = Pin(32)
touch_2_pin = Pin(33)
touch_1 = TouchPad(touch_1_pin)
touch_2 = TouchPad(touch_2_pin)
wlan_ap = WLAN(AP_IF)
wlan_sta = WLAN(STA_IF)
if version == 4:
class StatusLED:
BLACK = [0, 0, 0]
BLUE = [0, 0, 1]
CYAN = [0, 1, 1]
GREEN = [0, 1, 0]
MAGENTA = [1, 0, 1]
RED = [1, 0, 0]
WHITE = [1, 1, 1]
YELLOW = [1, 0.5, 0]
APP_BOOTING = MAGENTA, False
APP_SHUTDOWN = YELLOW, False
APP_UPGRADING = YELLOW, True
APP_RESETTING = MAGENTA, True
APP_IDLE = GREEN, False
APP_RUNNING = GREEN, True
APP_ERROR = RED, False
NETWORK_SCANNING = BLUE, True
NETWORK_CONNECTED = BLUE, False
NETWORK_HOTSPOT = WHITE, False
def __init__(self):
self.r = PWM(Pin(27), freq=40000, duty=1023)
self.g = PWM(Pin(14), freq=40000, duty=1023)
self.b = PWM(Pin(12), freq=40000, duty=1023)
self.network_state = None
self.app_state = None
def write(self, color):
r, g, b = color
self.r.duty(int((1 - r) * 1023))
self.g.duty(int((1 - g) * 1023))
self.b.duty(int((1 - b) * 1023))
async def show(self, color, blink=False):
if blink:
for _ in range(10):
self.write(color)
await sleep_ms(100)
self.write(self.BLACK)
await sleep_ms(100)
else:
self.write(color)
await sleep_ms(2000)
async def run(self):
while True:
if self.network_state is not None:
await self.show(*self.network_state)
if self.app_state is not None:
await self.show(*self.app_state)
if self.network_state is None and self.app_state is None:
await self.show(self.BLACK)
else:
assert version == 2
class StatusLED:
BLACK = 0
APP_BOOTING = 1, False
APP_SHUTDOWN = 1, False
APP_UPGRADING = 1, True
APP_RESETTING = 1, True
APP_IDLE = 1, False
APP_RUNNING = 1, True
APP_ERROR = 1, False
NETWORK_SCANNING = 1, True
NETWORK_CONNECTED = 1, False
NETWORK_HOTSPOT = 1, False
def __init__(self):
self.sl = PWM(Pin(5), freq=40000, duty=1023)
self.network_state = None
self.app_state = None
def write(self, duty):
self.sl.duty(int(duty * 1023))
async def show(self, color, blink=False):
if blink:
for _ in range(10):
self.write(color)
await sleep_ms(100)
self.write(self.BLACK)
await sleep_ms(100)
else:
self.write(color)
await sleep_ms(2000)
async def run(self):
while True:
if self.network_state is not None:
await self.show(*self.network_state)
if self.app_state is not None:
await self.show(*self.app_state)
if self.network_state is None and self.app_state is None:
await self.show(self.BLACK)
status = StatusLED()
| from os import uname
# noinspection PyUnresolvedReferences
from esp32 import NVS
# noinspection PyUnresolvedReferences
from machine import I2C, Pin, PWM, Signal, TouchPad
# noinspection PyUnresolvedReferences
from network import AP_IF, STA_IF, WLAN
from uasyncio import sleep_ms
class FakeSignal:
pass
version = 4 if "PottyBox 4.0" in uname().machine else 2
boot = Signal(0, Pin.IN, invert=True)
exhaust = Signal(13 if version == 4 else 14, Pin.OUT, value=0)
flush_1 = Signal(25 if version == 4 else 12, Pin.OUT, value=0)
flush_2 = Signal(26 if version == 4 else 13, Pin.OUT, value=0)
i2c = I2C(0, sda=Pin(21), scl=Pin(22))
nvs = NVS("pb4")
s1_led = Signal(2, Pin.OUT, value=0) if version == 4 else FakeSignal()
s1_int = Signal(4, Pin.IN, pull=Pin.PULL_UP) if version == 4 else FakeSignal()
s1_en = Signal(5, Pin.OUT, value=1, invert=True) if version == 4 else FakeSignal()
s2_led = Signal(18, Pin.OUT, value=0) if version == 4 else FakeSignal()
s2_int = Signal(19, Pin.IN, pull=Pin.PULL_UP) if version == 4 else FakeSignal()
s2_en = Signal(23, Pin.OUT, value=1, invert=True) if version == 4 else FakeSignal()
touch_1_pin = Pin(32)
touch_2_pin = Pin(33)
touch_1 = TouchPad(touch_1_pin)
touch_2 = TouchPad(touch_2_pin)
wlan_ap = WLAN(AP_IF)
wlan_sta = WLAN(STA_IF)
if version == 4:
class StatusLED:
BLACK = [0, 0, 0]
BLUE = [0, 0, 1]
CYAN = [0, 1, 1]
GREEN = [0, 1, 0]
MAGENTA = [1, 0, 1]
RED = [1, 0, 0]
WHITE = [1, 1, 1]
YELLOW = [1, 0.5, 0]
APP_BOOTING = MAGENTA, False
APP_SHUTDOWN = YELLOW, False
APP_UPGRADING = YELLOW, True
APP_RESETTING = MAGENTA, True
APP_IDLE = GREEN, False
APP_RUNNING = GREEN, True
APP_ERROR = RED, False
NETWORK_SCANNING = BLUE, True
NETWORK_CONNECTED = BLUE, False
NETWORK_HOTSPOT = WHITE, False
def __init__(self):
self.r = PWM(Pin(27), freq=40000, duty=1023)
self.g = PWM(Pin(14), freq=40000, duty=1023)
self.b = PWM(Pin(12), freq=40000, duty=1023)
self.network_state = None
self.app_state = None
def write(self, color):
r, g, b = color
self.r.duty(int((1 - r) * 1023))
self.g.duty(int((1 - g) * 1023))
self.b.duty(int((1 - b) * 1023))
async def show(self, color, blink=False):
if blink:
for _ in range(10):
self.write(color)
await sleep_ms(100)
self.write(self.BLACK)
await sleep_ms(100)
else:
self.write(color)
await sleep_ms(2000)
async def run(self):
while True:
if self.network_state is not None:
await self.show(*self.network_state)
if self.app_state is not None:
await self.show(*self.app_state)
if self.network_state is None and self.app_state is None:
await self.show(self.BLACK)
else:
assert version == 2
class StatusLED:
BLACK = 0
APP_BOOTING = 1, False
APP_SHUTDOWN = 1, False
APP_UPGRADING = 1, True
APP_RESETTING = 1, True
APP_IDLE = 1, False
APP_RUNNING = 1, True
APP_ERROR = 1, False
NETWORK_SCANNING = 1, True
NETWORK_CONNECTED = 1, False
NETWORK_HOTSPOT = 1, False
def __init__(self):
self.sl = PWM(Pin(5), freq=40000, duty=1023)
self.network_state = None
self.app_state = None
def write(self, duty):
self.sl.duty(int(duty * 1023))
async def show(self, color, blink=False):
if blink:
for _ in range(10):
self.write(color)
await sleep_ms(100)
self.write(self.BLACK)
await sleep_ms(100)
else:
self.write(color)
await sleep_ms(2000)
async def run(self):
while True:
if self.network_state is not None:
await self.show(*self.network_state)
if self.app_state is not None:
await self.show(*self.app_state)
if self.network_state is None and self.app_state is None:
await self.show(self.BLACK)
status = StatusLED() | en | 0.462762 | # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences | 2.108531 | 2 |
tests/rules/test_category_coverage.py | gitter-badger/arche | 0 | 6622776 | from arche.rules.category_coverage import get_coverage_per_category
from arche.rules.result import Level
from conftest import create_result
import pandas as pd
import pytest
@pytest.mark.parametrize(
"data, tags, expected_messages",
[
(
{"sex": ["male", "female", "male"], "country": ["uk", "uk", "uk"]},
{"category": ["sex", "country"]},
{
Level.INFO: [
(
"2 categories in 'sex'",
None,
None,
pd.Series({"male": 2, "female": 1}, name="sex"),
),
(
"1 categories in 'country'",
None,
None,
pd.Series({"uk": 3}, name="country"),
),
]
},
)
],
)
def test_get_coverage_per_category(data, tags, expected_messages):
assert get_coverage_per_category(pd.DataFrame(data), tags) == create_result(
"Coverage For Scraped Categories", expected_messages
)
| from arche.rules.category_coverage import get_coverage_per_category
from arche.rules.result import Level
from conftest import create_result
import pandas as pd
import pytest
@pytest.mark.parametrize(
"data, tags, expected_messages",
[
(
{"sex": ["male", "female", "male"], "country": ["uk", "uk", "uk"]},
{"category": ["sex", "country"]},
{
Level.INFO: [
(
"2 categories in 'sex'",
None,
None,
pd.Series({"male": 2, "female": 1}, name="sex"),
),
(
"1 categories in 'country'",
None,
None,
pd.Series({"uk": 3}, name="country"),
),
]
},
)
],
)
def test_get_coverage_per_category(data, tags, expected_messages):
assert get_coverage_per_category(pd.DataFrame(data), tags) == create_result(
"Coverage For Scraped Categories", expected_messages
)
| none | 1 | 2.706221 | 3 | |
primary/amber/log.py | KarlTDebiec/MDclt | 0 | 6622777 | # -*- coding: utf-8 -*-
# MDclt.primary.amber.log.py
#
# Copyright (C) 2012-2015 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Classes for transfer of AMBER simulation logs to h5
"""
################################### MODULES ####################################
from __future__ import division, print_function
import os, sys
import numpy as np
from MDclt import Block, Block_Acceptor, primary
################################## FUNCTIONS ###################################
def add_parser(tool_subparsers, **kwargs):
"""
Adds subparser for this analysis to a nascent argument parser
**Arguments:**
:*tool_subparsers*: Argparse subparsers object to add subparser
:*args*: Passed to tool_subparsers.add_parser(...)
:*\*\*kwargs*: Passed to tool_subparsers.add_parser(...)
.. todo:
- Implement nested subparser (should be 'amber log', not just 'log')
"""
from MDclt import overridable_defaults
subparser = primary.add_parser(tool_subparsers,
name = "log",
help = "Load AMBER logs")
arg_groups = {ag.title:ag for ag in subparser._action_groups}
arg_groups["input"].add_argument(
"-frames_per_file",
type = int,
required = False,
help = "Number of frames in each file; used to check if new data " +
"is present")
arg_groups["input"].add_argument(
"-start_time",
type = float,
required = False,
help = "Time of first frame (ns) (optional)")
arg_groups["output"].add_argument(
"-output",
type = str,
required = True,
nargs = "+",
action = overridable_defaults(nargs = 2, defaults = {1: "/log"}),
help = "H5 file and optionally address in which to output data " +
"(default address: /log)")
subparser.set_defaults(analysis = command_line)
def command_line(n_cores = 1, **kwargs):
"""
Provides command line functionality for this analysis
**Arguments:**
:*n_cores*: Number of cores to use
.. todo:
- Figure out syntax to get this into MDclt.primary
"""
from multiprocessing import Pool
from MDclt import pool_director
block_generator = AmberLog_Block_Generator(**kwargs)
block_acceptor = Block_Acceptor(outputs = block_generator.outputs,
**kwargs)
if n_cores == 1: # Serial
for block in block_generator:
block()
block_acceptor.send(block)
else: # Parallel (processes)
pool = Pool(n_cores)
for block in pool.imap_unordered(pool_director, block_generator):
pass
block_acceptor.send(block)
pool.close()
pool.join()
block_acceptor.close()
################################### CLASSES ####################################
class AmberLog_Block_Generator(primary.Primary_Block_Generator):
"""
Generator class that prepares blocks of analysis
"""
fields = [("TIME(PS)", "time", "ns"),
("Etot", "total energy", "kcal mol-1"),
("EPtot", "potential energy", "kcal mol-1"),
("EKtot", "kinetic energy", "kcal mol-1"),
("BOND", "bond energy", "kcal mol-1"),
("ANGLE", "angle energy", "kcal mol-1"),
("DIHED", "dihedral energy", "kcal mol-1"),
("EELEC", "coulomb energy", "kcal mol-1"),
("1-4 EEL", "coulomb 1-4 energy", "kcal mol-1"),
("VDWAALS", "van der Waals energy", "kcal mol-1"),
("1-4 NB", "van der Waals 1-4 energy", "kcal mol-1"),
("EHBOND", "hydrogen bond energy", "kcal mol-1"),
("RESTRAINT", "position restraint energy", "kcal mol-1"),
("EKCMT", "center of mass motion kinetic energy",
"kcal mol-1"),
("VIRIAL", "virial energy", "kcal mol-1"),
("EPOLZ", "polarization energy", "kcal mol-1"),
("TEMP(K)", "temperature", "K"),
("PRESS", "pressure", "bar"),
("VOLUME", "volume", "A3"),
("Density", "density", "g/cm3"),
("Dipole convergence: rms",
"dipole convergence rms", None),
("iters", "dipole convergence iterations",
None)]
def __init__(self, infiles, output, frames_per_file = None, **kwargs):
"""
Initializes generator
**Arguments:**
:*output*: List including path to h5 file and
address within h5 file
:*infiles*: List of infiles
:*frames_per_file*: Number of frames in each infile
.. todo:
- Intelligently break lists of infiles into blocks larger
than 1
"""
# Input
self.infiles = infiles
self.frames_per_file = frames_per_file
self.infiles_per_block = 1
# Output
self.outputs = [(output[0], os.path.normpath(output[1]))]
# Adjust start time, if applicable
self.get_time_offset(**kwargs)
# Determine dtype of input data
self.get_dataset_format(**kwargs)
super(AmberLog_Block_Generator, self).__init__(**kwargs)
# Disregard last infile, if applicable
self.cut_incomplete_infiles(**kwargs)
# Output
self.outputs = [(output[0], os.path.normpath(output[1]),
(self.final_slice.stop - self.final_slice.start,))]
def next(self):
"""
Prepares and returns next Block of analysis
"""
if len(self.infiles) == 0:
raise StopIteration()
else:
block_infiles = self.infiles[:self.infiles_per_block]
block_slice = slice(self.start_index,
self.start_index + len(block_infiles) * self.frames_per_file, 1)
self.infiles = self.infiles[self.infiles_per_block:]
self.start_index += len(block_infiles) * self.frames_per_file
return AmberLog_Block(infiles = block_infiles,
raw_keys = self.raw_keys,
new_keys = self.new_keys,
output = self.outputs[0],
slc = block_slice,
time_offset = self.time_offset,
dtype = self.dtype)
def get_time_offset(self, start_time = None, **kwargs):
"""
Calculates time offset based on desired and actual time of first frame
**Arguments:**
:*start_time*: Desired time of first frame (ns); typically 0.001
"""
from subprocess import Popen, PIPE
if start_time is None:
self.time_offset = 0
else:
with open(os.devnull, "w") as fnull:
command = "cat {0} | ".format(self.infiles[0]) + \
"grep -m 1 'TIME(PS)' | " + \
"awk '{{print $6}}'"
process = Popen(command,
stdout = PIPE,
stderr = fnull,
shell = True)
result = process.stdout.read()
self.time_offset = float(result) / -1000 + start_time
def get_dataset_format(self, **kwargs):
"""
Determines format of dataset
"""
from h5py import File as h5
out_path, out_address = self.outputs[0]
with h5(out_path) as out_h5:
if out_address in out_h5:
# If dataset already exists, extract current dtype
self.dtype = out_h5[out_address].dtype
self.new_keys = list(self.dtype.names)
self.raw_keys = []
for key in self.new_keys:
self.raw_keys += [r for r, n, _ in self.fields if n == key]
self.attrs = dict(out_h5[out_address].attrs)
else:
# Otherwise, determine fields present in infile
raw_keys = []
breaking = False
with open(self.infiles[0], "r") as infile:
raw_text = [line.strip() for line in infile.readlines()]
for i in xrange(len(raw_text)):
if breaking: break
if raw_text[i].startswith("NSTEP"):
while True:
if raw_text[i].startswith("----------"):
breaking = True
break
for j, field in enumerate(
raw_text[i].split("=")[:-1]):
if j == 0:
raw_keys += [field.strip()]
else:
raw_keys += [" ".join(field.split()[1:])]
i += 1
# Determine appropriate dtype of new data
self.raw_keys = ["TIME(PS)"]
self.new_keys = ["time"]
self.dtype = [("time", "f4")]
self.attrs = {"time units": "ns"}
for raw_key, new_key, units in self.fields[1:]:
if raw_key in raw_keys:
self.raw_keys += [raw_key]
self.new_keys += [new_key]
self.dtype += [(new_key, "f4")]
if units is not None:
self.attrs[new_key + " units"] = units
def cut_incomplete_infiles(self, **kwargs):
"""
Checks if log of last infile is incomplete; if so removes from
list of infiles
"""
from subprocess import Popen, PIPE
if len(self.infiles) == 0:
return
with open(os.devnull, "w") as fnull:
command = "tail -n 1 {0}".format(self.infiles[-1])
process = Popen(command,
stdout = PIPE,
stderr = fnull,
shell = True)
result = process.stdout.read()
if not (result.startswith("| Total wall time:") # pmemd.cuda
or result.startswith("| Master Total wall time:")): # pmemd
self.infiles.pop(-1)
self.final_slice = slice(self.final_slice.start,
self.final_slice.stop - self.frames_per_file, 1)
class AmberLog_Block(Block):
"""
Independent block of analysis
"""
def __init__(self, infiles, raw_keys, new_keys, output, dtype, slc,
time_offset = 0, attrs = {}, **kwargs):
"""
Initializes block of analysis
**Arguments:**
:*infiles*: List of infiles
:*raw_keys*: Original names of fields in Amber mdout
:*new_keys*: Desired names of fields in nascent dataset
:*output*: Path to h5 file and address within h5 file
:*dtype*: Data type of nascent dataset
:*slc*: Slice within dataset at which this block
will be stored
:*time_offset*: Offset by which to adjust simulation time
:*attrs*: Attributes to add to dataset
"""
super(AmberLog_Block, self).__init__(**kwargs)
self.infiles = infiles
self.raw_keys = raw_keys
self.new_keys = new_keys
self.time_offset = time_offset
self.output = output
self.datasets = {self.output: dict(slc = slc, attrs = attrs,
data = np.empty(slc.stop - slc.start, dtype))}
def __call__(self, **kwargs):
"""
Runs this block of analysis
"""
# Load raw data from each infile
print(self.infiles)
raw_data = {raw_key: [] for raw_key in self.raw_keys}
for infile in self.infiles:
with open(infile, "r") as infile:
raw_text = [line.strip() for line in infile.readlines()]
i = 0
while i < len(raw_text):
if raw_text[i].startswith("A V E R A G E S"): break
if raw_text[i].startswith("NSTEP"):
while True:
if raw_text[i].startswith("----------"): break
line = raw_text[i].split("=")
for j, field in enumerate(line[:-1]):
if j == 0:
raw_key = field.strip()
else:
raw_key = " ".join(field.split()[1:])
value = line[j+1].split()[0]
if raw_key in self.raw_keys:
raw_data[raw_key] += [value]
i += 1
i += 1
# Copy from raw_data to new_data
self.datasets[self.output]["data"]["time"] = (np.array(
raw_data["TIME(PS)"], np.float) / 1000) + self.time_offset
for raw_key, new_key in zip(self.raw_keys[1:], self.new_keys[1:]):
try:
self.datasets[self.output]["data"][new_key] = np.array(
raw_data[raw_key])
except:
print(raw_data[raw_key])
print(raw_key)
raise
| # -*- coding: utf-8 -*-
# MDclt.primary.amber.log.py
#
# Copyright (C) 2012-2015 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Classes for transfer of AMBER simulation logs to h5
"""
################################### MODULES ####################################
from __future__ import division, print_function
import os, sys
import numpy as np
from MDclt import Block, Block_Acceptor, primary
################################## FUNCTIONS ###################################
def add_parser(tool_subparsers, **kwargs):
"""
Adds subparser for this analysis to a nascent argument parser
**Arguments:**
:*tool_subparsers*: Argparse subparsers object to add subparser
:*args*: Passed to tool_subparsers.add_parser(...)
:*\*\*kwargs*: Passed to tool_subparsers.add_parser(...)
.. todo:
- Implement nested subparser (should be 'amber log', not just 'log')
"""
from MDclt import overridable_defaults
subparser = primary.add_parser(tool_subparsers,
name = "log",
help = "Load AMBER logs")
arg_groups = {ag.title:ag for ag in subparser._action_groups}
arg_groups["input"].add_argument(
"-frames_per_file",
type = int,
required = False,
help = "Number of frames in each file; used to check if new data " +
"is present")
arg_groups["input"].add_argument(
"-start_time",
type = float,
required = False,
help = "Time of first frame (ns) (optional)")
arg_groups["output"].add_argument(
"-output",
type = str,
required = True,
nargs = "+",
action = overridable_defaults(nargs = 2, defaults = {1: "/log"}),
help = "H5 file and optionally address in which to output data " +
"(default address: /log)")
subparser.set_defaults(analysis = command_line)
def command_line(n_cores = 1, **kwargs):
"""
Provides command line functionality for this analysis
**Arguments:**
:*n_cores*: Number of cores to use
.. todo:
- Figure out syntax to get this into MDclt.primary
"""
from multiprocessing import Pool
from MDclt import pool_director
block_generator = AmberLog_Block_Generator(**kwargs)
block_acceptor = Block_Acceptor(outputs = block_generator.outputs,
**kwargs)
if n_cores == 1: # Serial
for block in block_generator:
block()
block_acceptor.send(block)
else: # Parallel (processes)
pool = Pool(n_cores)
for block in pool.imap_unordered(pool_director, block_generator):
pass
block_acceptor.send(block)
pool.close()
pool.join()
block_acceptor.close()
################################### CLASSES ####################################
class AmberLog_Block_Generator(primary.Primary_Block_Generator):
"""
Generator class that prepares blocks of analysis
"""
fields = [("TIME(PS)", "time", "ns"),
("Etot", "total energy", "kcal mol-1"),
("EPtot", "potential energy", "kcal mol-1"),
("EKtot", "kinetic energy", "kcal mol-1"),
("BOND", "bond energy", "kcal mol-1"),
("ANGLE", "angle energy", "kcal mol-1"),
("DIHED", "dihedral energy", "kcal mol-1"),
("EELEC", "coulomb energy", "kcal mol-1"),
("1-4 EEL", "coulomb 1-4 energy", "kcal mol-1"),
("VDWAALS", "van der Waals energy", "kcal mol-1"),
("1-4 NB", "van der Waals 1-4 energy", "kcal mol-1"),
("EHBOND", "hydrogen bond energy", "kcal mol-1"),
("RESTRAINT", "position restraint energy", "kcal mol-1"),
("EKCMT", "center of mass motion kinetic energy",
"kcal mol-1"),
("VIRIAL", "virial energy", "kcal mol-1"),
("EPOLZ", "polarization energy", "kcal mol-1"),
("TEMP(K)", "temperature", "K"),
("PRESS", "pressure", "bar"),
("VOLUME", "volume", "A3"),
("Density", "density", "g/cm3"),
("Dipole convergence: rms",
"dipole convergence rms", None),
("iters", "dipole convergence iterations",
None)]
def __init__(self, infiles, output, frames_per_file = None, **kwargs):
"""
Initializes generator
**Arguments:**
:*output*: List including path to h5 file and
address within h5 file
:*infiles*: List of infiles
:*frames_per_file*: Number of frames in each infile
.. todo:
- Intelligently break lists of infiles into blocks larger
than 1
"""
# Input
self.infiles = infiles
self.frames_per_file = frames_per_file
self.infiles_per_block = 1
# Output
self.outputs = [(output[0], os.path.normpath(output[1]))]
# Adjust start time, if applicable
self.get_time_offset(**kwargs)
# Determine dtype of input data
self.get_dataset_format(**kwargs)
super(AmberLog_Block_Generator, self).__init__(**kwargs)
# Disregard last infile, if applicable
self.cut_incomplete_infiles(**kwargs)
# Output
self.outputs = [(output[0], os.path.normpath(output[1]),
(self.final_slice.stop - self.final_slice.start,))]
def next(self):
"""
Prepares and returns next Block of analysis
"""
if len(self.infiles) == 0:
raise StopIteration()
else:
block_infiles = self.infiles[:self.infiles_per_block]
block_slice = slice(self.start_index,
self.start_index + len(block_infiles) * self.frames_per_file, 1)
self.infiles = self.infiles[self.infiles_per_block:]
self.start_index += len(block_infiles) * self.frames_per_file
return AmberLog_Block(infiles = block_infiles,
raw_keys = self.raw_keys,
new_keys = self.new_keys,
output = self.outputs[0],
slc = block_slice,
time_offset = self.time_offset,
dtype = self.dtype)
def get_time_offset(self, start_time = None, **kwargs):
"""
Calculates time offset based on desired and actual time of first frame
**Arguments:**
:*start_time*: Desired time of first frame (ns); typically 0.001
"""
from subprocess import Popen, PIPE
if start_time is None:
self.time_offset = 0
else:
with open(os.devnull, "w") as fnull:
command = "cat {0} | ".format(self.infiles[0]) + \
"grep -m 1 'TIME(PS)' | " + \
"awk '{{print $6}}'"
process = Popen(command,
stdout = PIPE,
stderr = fnull,
shell = True)
result = process.stdout.read()
self.time_offset = float(result) / -1000 + start_time
def get_dataset_format(self, **kwargs):
"""
Determines format of dataset
"""
from h5py import File as h5
out_path, out_address = self.outputs[0]
with h5(out_path) as out_h5:
if out_address in out_h5:
# If dataset already exists, extract current dtype
self.dtype = out_h5[out_address].dtype
self.new_keys = list(self.dtype.names)
self.raw_keys = []
for key in self.new_keys:
self.raw_keys += [r for r, n, _ in self.fields if n == key]
self.attrs = dict(out_h5[out_address].attrs)
else:
# Otherwise, determine fields present in infile
raw_keys = []
breaking = False
with open(self.infiles[0], "r") as infile:
raw_text = [line.strip() for line in infile.readlines()]
for i in xrange(len(raw_text)):
if breaking: break
if raw_text[i].startswith("NSTEP"):
while True:
if raw_text[i].startswith("----------"):
breaking = True
break
for j, field in enumerate(
raw_text[i].split("=")[:-1]):
if j == 0:
raw_keys += [field.strip()]
else:
raw_keys += [" ".join(field.split()[1:])]
i += 1
# Determine appropriate dtype of new data
self.raw_keys = ["TIME(PS)"]
self.new_keys = ["time"]
self.dtype = [("time", "f4")]
self.attrs = {"time units": "ns"}
for raw_key, new_key, units in self.fields[1:]:
if raw_key in raw_keys:
self.raw_keys += [raw_key]
self.new_keys += [new_key]
self.dtype += [(new_key, "f4")]
if units is not None:
self.attrs[new_key + " units"] = units
def cut_incomplete_infiles(self, **kwargs):
"""
Checks if log of last infile is incomplete; if so removes from
list of infiles
"""
from subprocess import Popen, PIPE
if len(self.infiles) == 0:
return
with open(os.devnull, "w") as fnull:
command = "tail -n 1 {0}".format(self.infiles[-1])
process = Popen(command,
stdout = PIPE,
stderr = fnull,
shell = True)
result = process.stdout.read()
if not (result.startswith("| Total wall time:") # pmemd.cuda
or result.startswith("| Master Total wall time:")): # pmemd
self.infiles.pop(-1)
self.final_slice = slice(self.final_slice.start,
self.final_slice.stop - self.frames_per_file, 1)
class AmberLog_Block(Block):
"""
Independent block of analysis
"""
def __init__(self, infiles, raw_keys, new_keys, output, dtype, slc,
time_offset = 0, attrs = {}, **kwargs):
"""
Initializes block of analysis
**Arguments:**
:*infiles*: List of infiles
:*raw_keys*: Original names of fields in Amber mdout
:*new_keys*: Desired names of fields in nascent dataset
:*output*: Path to h5 file and address within h5 file
:*dtype*: Data type of nascent dataset
:*slc*: Slice within dataset at which this block
will be stored
:*time_offset*: Offset by which to adjust simulation time
:*attrs*: Attributes to add to dataset
"""
super(AmberLog_Block, self).__init__(**kwargs)
self.infiles = infiles
self.raw_keys = raw_keys
self.new_keys = new_keys
self.time_offset = time_offset
self.output = output
self.datasets = {self.output: dict(slc = slc, attrs = attrs,
data = np.empty(slc.stop - slc.start, dtype))}
def __call__(self, **kwargs):
"""
Runs this block of analysis
"""
# Load raw data from each infile
print(self.infiles)
raw_data = {raw_key: [] for raw_key in self.raw_keys}
for infile in self.infiles:
with open(infile, "r") as infile:
raw_text = [line.strip() for line in infile.readlines()]
i = 0
while i < len(raw_text):
if raw_text[i].startswith("A V E R A G E S"): break
if raw_text[i].startswith("NSTEP"):
while True:
if raw_text[i].startswith("----------"): break
line = raw_text[i].split("=")
for j, field in enumerate(line[:-1]):
if j == 0:
raw_key = field.strip()
else:
raw_key = " ".join(field.split()[1:])
value = line[j+1].split()[0]
if raw_key in self.raw_keys:
raw_data[raw_key] += [value]
i += 1
i += 1
# Copy from raw_data to new_data
self.datasets[self.output]["data"]["time"] = (np.array(
raw_data["TIME(PS)"], np.float) / 1000) + self.time_offset
for raw_key, new_key in zip(self.raw_keys[1:], self.new_keys[1:]):
try:
self.datasets[self.output]["data"][new_key] = np.array(
raw_data[raw_key])
except:
print(raw_data[raw_key])
print(raw_key)
raise
| en | 0.568547 | # -*- coding: utf-8 -*- # MDclt.primary.amber.log.py # # Copyright (C) 2012-2015 <NAME> # All rights reserved. # # This software may be modified and distributed under the terms of the # BSD license. See the LICENSE file for details. Classes for transfer of AMBER simulation logs to h5 ################################### MODULES #################################### ################################## FUNCTIONS ################################### Adds subparser for this analysis to a nascent argument parser **Arguments:** :*tool_subparsers*: Argparse subparsers object to add subparser :*args*: Passed to tool_subparsers.add_parser(...) :*\*\*kwargs*: Passed to tool_subparsers.add_parser(...) .. todo: - Implement nested subparser (should be 'amber log', not just 'log') Provides command line functionality for this analysis **Arguments:** :*n_cores*: Number of cores to use .. todo: - Figure out syntax to get this into MDclt.primary # Serial # Parallel (processes) ################################### CLASSES #################################### Generator class that prepares blocks of analysis Initializes generator **Arguments:** :*output*: List including path to h5 file and address within h5 file :*infiles*: List of infiles :*frames_per_file*: Number of frames in each infile .. todo: - Intelligently break lists of infiles into blocks larger than 1 # Input # Output # Adjust start time, if applicable # Determine dtype of input data # Disregard last infile, if applicable # Output Prepares and returns next Block of analysis Calculates time offset based on desired and actual time of first frame **Arguments:** :*start_time*: Desired time of first frame (ns); typically 0.001 Determines format of dataset # If dataset already exists, extract current dtype # Otherwise, determine fields present in infile # Determine appropriate dtype of new data Checks if log of last infile is incomplete; if so removes from list of infiles # pmemd.cuda # pmemd Independent block of analysis Initializes block of analysis **Arguments:** :*infiles*: List of infiles :*raw_keys*: Original names of fields in Amber mdout :*new_keys*: Desired names of fields in nascent dataset :*output*: Path to h5 file and address within h5 file :*dtype*: Data type of nascent dataset :*slc*: Slice within dataset at which this block will be stored :*time_offset*: Offset by which to adjust simulation time :*attrs*: Attributes to add to dataset Runs this block of analysis # Load raw data from each infile # Copy from raw_data to new_data | 2.219608 | 2 |
FutureScope/stylize_image.py | andrewstito/Image-Style-Transfer | 0 | 6622778 | <reponame>andrewstito/Image-Style-Transfer
#imports
import os
import numpy as np
from os.path import exists
from sys import stdout
import utils
from argparse import ArgumentParser
import tensorflow as tf
import transform
NETWORK_PATH='networks'
def build_parser():
parser = ArgumentParser()
parser.add_argument('--content', type=str,
dest='content', help='content image path',
metavar='CONTENT', required=True)
parser.add_argument('--network-path', type=str,
dest='network_path',
help='path to network (default %(default)s)',
metavar='NETWORK_PATH', default=NETWORK_PATH)
parser.add_argument('--output-path', type=str,
dest='output_path',
help='path for output',
metavar='OUTPUT_PATH', required=True)
return parser
# content and trained network path check
def check_opts(opts):
assert exists(opts.content), "content not found!"
assert exists(opts.network_path), "network not found!"
#main
def main():
parser = build_parser()
options = parser.parse_args()
check_opts(options)
network = options.network_path
if not os.path.isdir(network):
parser.error("Network %s does not exist." % network)
# Checking if image size is div by 4. This is a pre-consdition in the model implemented
content_image = utils.load_image(options.content)
reshaped_content_height = (content_image.shape[0] - content_image.shape[0] % 4)
reshaped_content_width = (content_image.shape[1] - content_image.shape[1] % 4)
reshaped_content_image = content_image[:reshaped_content_height, :reshaped_content_width, :]
reshaped_content_image = np.ndarray.reshape(reshaped_content_image, (1,) + reshaped_content_image.shape)
prediction = ffwd(reshaped_content_image, network)
utils.save_image(prediction, options.output_path)
def ffwd(content, network_path):
with tf.Session() as sess:
img_placeholder = tf.placeholder(tf.float32, shape=content.shape,
name='img_placeholder')
network = transform.net(img_placeholder)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(network_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception("No checkpoint found...")
prediction = sess.run(network, feed_dict={img_placeholder:content})
return prediction[0]
if __name__ == '__main__':
main()
| #imports
import os
import numpy as np
from os.path import exists
from sys import stdout
import utils
from argparse import ArgumentParser
import tensorflow as tf
import transform
NETWORK_PATH='networks'
def build_parser():
parser = ArgumentParser()
parser.add_argument('--content', type=str,
dest='content', help='content image path',
metavar='CONTENT', required=True)
parser.add_argument('--network-path', type=str,
dest='network_path',
help='path to network (default %(default)s)',
metavar='NETWORK_PATH', default=NETWORK_PATH)
parser.add_argument('--output-path', type=str,
dest='output_path',
help='path for output',
metavar='OUTPUT_PATH', required=True)
return parser
# content and trained network path check
def check_opts(opts):
assert exists(opts.content), "content not found!"
assert exists(opts.network_path), "network not found!"
#main
def main():
parser = build_parser()
options = parser.parse_args()
check_opts(options)
network = options.network_path
if not os.path.isdir(network):
parser.error("Network %s does not exist." % network)
# Checking if image size is div by 4. This is a pre-consdition in the model implemented
content_image = utils.load_image(options.content)
reshaped_content_height = (content_image.shape[0] - content_image.shape[0] % 4)
reshaped_content_width = (content_image.shape[1] - content_image.shape[1] % 4)
reshaped_content_image = content_image[:reshaped_content_height, :reshaped_content_width, :]
reshaped_content_image = np.ndarray.reshape(reshaped_content_image, (1,) + reshaped_content_image.shape)
prediction = ffwd(reshaped_content_image, network)
utils.save_image(prediction, options.output_path)
def ffwd(content, network_path):
with tf.Session() as sess:
img_placeholder = tf.placeholder(tf.float32, shape=content.shape,
name='img_placeholder')
network = transform.net(img_placeholder)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(network_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception("No checkpoint found...")
prediction = sess.run(network, feed_dict={img_placeholder:content})
return prediction[0]
if __name__ == '__main__':
main() | en | 0.86752 | #imports # content and trained network path check #main # Checking if image size is div by 4. This is a pre-consdition in the model implemented | 2.680131 | 3 |
mentors/mentors/models.py | mattfreire/mentors | 1 | 6622779 | import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Sum
User = get_user_model()
class Mentor(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
is_active = models.BooleanField(default=False)
rate = models.IntegerField(default=1000) # cents
title = models.CharField(max_length=50)
bio = models.TextField()
profile_picture = models.ImageField(blank=True, null=True)
approved = models.BooleanField(default=False)
def __str__(self):
return self.user.name
class MentorSession(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mentor = models.ForeignKey(Mentor, related_name="client_sessions", on_delete=models.CASCADE)
client = models.ForeignKey(User, related_name="mentor_sessions", on_delete=models.CASCADE)
start_time = models.DateTimeField(auto_now_add=True)
end_time = models.DateTimeField(blank=True, null=True)
session_length = models.IntegerField(blank=True, null=True) # seconds
completed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
def __str__(self):
return self.mentor.user.username
@classmethod
def calculate_session_length(cls, id):
mentor_session = MentorSession.objects.get(id=id)
event_sum = mentor_session.events\
.all()\
.aggregate(session_length_sum=Sum("session_length"))
return event_sum["session_length_sum"]
class MentorSessionEvent(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mentor_session = models.ForeignKey(MentorSession, related_name="events", on_delete=models.CASCADE)
start_time = models.DateTimeField(auto_now_add=True)
end_time = models.DateTimeField(blank=True, null=True)
session_length = models.IntegerField(blank=True, null=True) # seconds
def __str__(self):
return str(self.id)
class Review(models.Model):
session = models.OneToOneField(MentorSession, on_delete=models.CASCADE)
description = models.TextField()
rating = models.IntegerField(default=5)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.session)
| import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Sum
User = get_user_model()
class Mentor(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
is_active = models.BooleanField(default=False)
rate = models.IntegerField(default=1000) # cents
title = models.CharField(max_length=50)
bio = models.TextField()
profile_picture = models.ImageField(blank=True, null=True)
approved = models.BooleanField(default=False)
def __str__(self):
return self.user.name
class MentorSession(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mentor = models.ForeignKey(Mentor, related_name="client_sessions", on_delete=models.CASCADE)
client = models.ForeignKey(User, related_name="mentor_sessions", on_delete=models.CASCADE)
start_time = models.DateTimeField(auto_now_add=True)
end_time = models.DateTimeField(blank=True, null=True)
session_length = models.IntegerField(blank=True, null=True) # seconds
completed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
def __str__(self):
return self.mentor.user.username
@classmethod
def calculate_session_length(cls, id):
mentor_session = MentorSession.objects.get(id=id)
event_sum = mentor_session.events\
.all()\
.aggregate(session_length_sum=Sum("session_length"))
return event_sum["session_length_sum"]
class MentorSessionEvent(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mentor_session = models.ForeignKey(MentorSession, related_name="events", on_delete=models.CASCADE)
start_time = models.DateTimeField(auto_now_add=True)
end_time = models.DateTimeField(blank=True, null=True)
session_length = models.IntegerField(blank=True, null=True) # seconds
def __str__(self):
return str(self.id)
class Review(models.Model):
session = models.OneToOneField(MentorSession, on_delete=models.CASCADE)
description = models.TextField()
rating = models.IntegerField(default=5)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.session)
| en | 0.653055 | # cents # seconds # seconds | 2.228348 | 2 |
main.py | junaidrahim/kiitwifi-speedtest | 3 | 6622780 | <reponame>junaidrahim/kiitwifi-speedtest
#!/usr/bin/python3
# this is the main script to run the speedtest every 5 minutes
import subprocess
from datetime import datetime
import time
while True:
timestamp = datetime.now()
f = open("/home/junaid/code/other/kiitspeedtest/data.txt", "a")
result = subprocess.run(['speedtest-cli'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
f.write("{}\n\n".format(timestamp))
f.write((result.stdout).decode("utf-8"))
f.write("\n---------------------------------------------------------------------\n\n")
time.sleep(300) | #!/usr/bin/python3
# this is the main script to run the speedtest every 5 minutes
import subprocess
from datetime import datetime
import time
while True:
timestamp = datetime.now()
f = open("/home/junaid/code/other/kiitspeedtest/data.txt", "a")
result = subprocess.run(['speedtest-cli'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
f.write("{}\n\n".format(timestamp))
f.write((result.stdout).decode("utf-8"))
f.write("\n---------------------------------------------------------------------\n\n")
time.sleep(300) | en | 0.70281 | #!/usr/bin/python3 # this is the main script to run the speedtest every 5 minutes | 2.689626 | 3 |
common_configs/apps/imagekit.py | nigma/django-common-configs | 5 | 6622781 | #-*- coding: utf-8 -*-
"""
Settings for django-imagekit_ - automated image processing for Django
.. _django-imagekit: https://github.com/matthewwithanm/django-imagekit
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from configurations import values
from ..utils import merge_items
class Imagekit(object):
#: Use optimistic strategy
#:
#: http://django-imagekit.rtfd.org/latest/configuration.html#django.conf.settings.IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY
IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = values.Value("imagekit.cachefiles.strategies.Optimistic", environ_prefix=None)
#: Define naming strategy
#:
#: http://django-imagekit.rtfd.org/latest/configuration.html#django.conf.settings.IMAGEKIT_SPEC_CACHEFILE_NAMER
IMAGEKIT_SPEC_CACHEFILE_NAMER = values.Value("imagekit.cachefiles.namers.source_name_dot_hash", environ_prefix=None)
@property
def INSTALLED_APPS(self):
"""
Appends :mod:`imagekit` to list of ``INSTALLED_APPS``.
"""
return merge_items(super(Imagekit, self).INSTALLED_APPS, [
"imagekit",
])
| #-*- coding: utf-8 -*-
"""
Settings for django-imagekit_ - automated image processing for Django
.. _django-imagekit: https://github.com/matthewwithanm/django-imagekit
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from configurations import values
from ..utils import merge_items
class Imagekit(object):
#: Use optimistic strategy
#:
#: http://django-imagekit.rtfd.org/latest/configuration.html#django.conf.settings.IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY
IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = values.Value("imagekit.cachefiles.strategies.Optimistic", environ_prefix=None)
#: Define naming strategy
#:
#: http://django-imagekit.rtfd.org/latest/configuration.html#django.conf.settings.IMAGEKIT_SPEC_CACHEFILE_NAMER
IMAGEKIT_SPEC_CACHEFILE_NAMER = values.Value("imagekit.cachefiles.namers.source_name_dot_hash", environ_prefix=None)
@property
def INSTALLED_APPS(self):
"""
Appends :mod:`imagekit` to list of ``INSTALLED_APPS``.
"""
return merge_items(super(Imagekit, self).INSTALLED_APPS, [
"imagekit",
])
| en | 0.439663 | #-*- coding: utf-8 -*- Settings for django-imagekit_ - automated image processing for Django .. _django-imagekit: https://github.com/matthewwithanm/django-imagekit #: Use optimistic strategy #: #: http://django-imagekit.rtfd.org/latest/configuration.html#django.conf.settings.IMAGEKIT_DEFAULT_CACHEFILE_STRATEGY #: Define naming strategy #: #: http://django-imagekit.rtfd.org/latest/configuration.html#django.conf.settings.IMAGEKIT_SPEC_CACHEFILE_NAMER Appends :mod:`imagekit` to list of ``INSTALLED_APPS``. | 1.773731 | 2 |
tests/test_03_id_token.py | IdentityPython/oicsrv | 6 | 6622782 | <reponame>IdentityPython/oicsrv
import json
import os
from cryptojwt.jws import jws
from cryptojwt.jwt import JWT
from cryptojwt.key_jar import KeyJar
from oidcmsg.oidc import AuthorizationRequest
from oidcmsg.oidc import RegistrationResponse
from oidcmsg.time_util import time_sans_frac
import pytest
from oidcendpoint.authn_event import create_authn_event
from oidcendpoint.client_authn import verify_client
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.id_token import IDToken
from oidcendpoint.id_token import get_sign_and_encrypt_algorithms
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.token import Token
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.user_info import UserInfo
KEYDEFS = [
{"type": "RSA", "key": "", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
]
BASEDIR = os.path.abspath(os.path.dirname(__file__))
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
USERS = json.loads(open(full_path("users.json")).read())
USERINFO = UserInfo(USERS)
AREQ = AuthorizationRequest(
response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid"],
state="state000",
nonce="nonce",
)
AREQS = AuthorizationRequest(
response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid", "address", "email"],
state="state000",
nonce="nonce",
)
AREQRC = AuthorizationRequest(
response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid", "address", "email"],
state="state000",
nonce="nonce",
claims={"id_token": {"nickname": None}}
)
conf = {
"issuer": "https://example.com/",
"password": "<PASSWORD>",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"keys": {"key_defs": KEYDEFS, "uri_path": "static/jwks.json"},
"jwks_uri": "https://example.com/jwks.json",
"endpoint": {
"authorization_endpoint": {
"path": "{}/authorization",
"class": Authorization,
"kwargs": {},
},
"token_endpoint": {"path": "{}/token", "class": Token, "kwargs": {}},
"userinfo_endpoint": {
"path": "{}/userinfo",
"class": userinfo.UserInfo,
"kwargs": {"db_file": "users.json"},
},
},
"authentication": {
"anon": {
"acr": INTERNETPROTOCOLPASSWORD,
"class": "oidcendpoint.user_authn.user.NoAuthn",
"kwargs": {"user": "diana"},
}
},
"userinfo": {"class": "oidcendpoint.user_info.UserInfo", "kwargs": {"db": USERS}, },
"client_authn": verify_client,
"template_dir": "template",
"id_token": {"class": IDToken, "kwargs": {"foo": "bar"}},
}
USER_ID = "diana"
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_idtoken(self):
self.endpoint_context = EndpointContext(conf)
self.endpoint_context.cdb["client_1"] = {
"client_secret": "hemligtochintekort",
"redirect_uris": [("https://example.com/cb", None)],
"client_salt": "salted",
"token_endpoint_auth_method": "client_secret_post",
"response_types": ["code", "token", "code id_token", "id_token"],
}
self.endpoint_context.keyjar.add_symmetric(
"client_1", "hemligtochintekort", ["sig", "enc"]
)
self.session_manager = self.endpoint_context.session_manager
self.user_id = USER_ID
def _create_session(self, auth_req, sub_type="public", sector_identifier=''):
if sector_identifier:
authz_req = auth_req.copy()
authz_req["sector_identifier_uri"] = sector_identifier
else:
authz_req = auth_req
client_id = authz_req['client_id']
ae = create_authn_event(self.user_id)
return self.session_manager.create_session(ae, authz_req, self.user_id,
client_id=client_id,
sub_type=sub_type)
def _mint_code(self, grant, session_id):
# Constructing an authorization code is now done
return grant.mint_token(
session_id=session_id,
endpoint_context=self.endpoint_context,
token_type="authorization_code",
token_handler=self.session_manager.token_handler["code"],
expires_at=time_sans_frac() + 300 # 5 minutes from now
)
def _mint_access_token(self, grant, session_id, token_ref):
return grant.mint_token(
session_id=session_id,
endpoint_context=self.endpoint_context,
token_type="access_token",
token_handler=self.session_manager.token_handler["access_token"],
expires_at=time_sans_frac() + 900, # 15 minutes from now
based_on=token_ref # Means the token (tok) was used to mint this token
)
def test_id_token_payload_0(self):
session_id = self._create_session(AREQ)
payload = self.endpoint_context.idtoken.payload(session_id)
assert set(payload.keys()) == {"sub", "nonce", "auth_time"}
def test_id_token_payload_with_code(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
code = self._mint_code(grant, session_id)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"], code=code.value
)
assert set(payload.keys()) == {"nonce", "c_hash", "sub", "auth_time"}
def test_id_token_payload_with_access_token(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
code = self._mint_code(grant, session_id)
access_token = self._mint_access_token(grant, session_id, code)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"], access_token=access_token.value
)
assert set(payload.keys()) == {"nonce", "at_hash", "sub", "auth_time"}
def test_id_token_payload_with_code_and_access_token(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
code = self._mint_code(grant, session_id)
access_token = self._mint_access_token(grant, session_id, code)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"], access_token=access_token.value, code=code.value
)
assert set(payload.keys()) == {"nonce", "c_hash", "at_hash", "sub", "auth_time"}
def test_id_token_payload_with_userinfo(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"given_name": None}}
payload = self.endpoint_context.idtoken.payload(session_id=session_id)
assert set(payload.keys()) == {"nonce", "given_name", "sub", "auth_time"}
def test_id_token_payload_many_0(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"given_name": None}}
code = self._mint_code(grant, session_id)
access_token = self._mint_access_token(grant, session_id, code)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"],
access_token=access_token.value,
code=code.value
)
assert set(payload.keys()) == {"nonce", "c_hash", "at_hash", "sub", "auth_time",
"given_name"}
def test_sign_encrypt_id_token(self):
session_id = self._create_session(AREQ)
_token = self.endpoint_context.idtoken.sign_encrypt(session_id, AREQ['client_id'],
sign=True)
assert _token
_jws = jws.factory(_token)
assert _jws.jwt.headers["alg"] == "RS256"
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert isinstance(res, dict)
assert res["aud"] == ["client_1"]
def test_get_sign_algorithm(self):
client_info = self.endpoint_context.cdb[AREQ['client_id']]
algs = get_sign_and_encrypt_algorithms(
self.endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS256"}
def test_no_default_encrypt_algorithms(self):
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
args = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True, encrypt=True
)
assert args["sign_alg"] == "RS256"
assert args["enc_enc"] == "A128CBC-HS256"
assert args["enc_alg"] == "RSA1_5"
def test_get_sign_algorithm_2(self):
client_info = RegistrationResponse(id_token_signed_response_alg="RS512")
endpoint_context = EndpointContext(conf)
algs = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS512"}
def test_get_sign_algorithm_3(self):
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
endpoint_context.jwx_def["signing_alg"] = {"id_token": "RS384"}
algs = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS384"}
def test_get_sign_algorithm_4(self):
client_info = RegistrationResponse(id_token_signed_response_alg="RS512")
endpoint_context = EndpointContext(conf)
endpoint_context.jwx_def["signing_alg"] = {"id_token": "RS384"}
algs = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS512"}
def test_available_claims(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"nickname": {"essential": True}}}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "nickname" in res
def test_no_available_claims(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"foobar": None}}
req = {"client_id": "client_1"}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "foobar" not in res
def test_client_claims(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["enable_claims_per_client"] = True
self.endpoint_context.cdb["client_1"]["id_token_claims"] = {"address": None}
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQ["scope"], usage="id_token")
grant.claims = {'id_token': _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "address" in res
assert "nickname" not in res
def test_client_claims_with_default(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
# self.endpoint_context.cdb["client_1"]["id_token_claims"] = {"address": None}
# self.endpoint_context.idtoken.enable_claims_per_client = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQ["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
# No user info claims should be there
assert "address" not in res
assert "nickname" not in res
def test_client_claims_scopes(self):
session_id = self._create_session(AREQS)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["add_claims_by_scope"] = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQS["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "address" in res
assert "email" in res
assert "nickname" not in res
def test_client_claims_scopes_and_request_claims_no_match(self):
session_id = self._create_session(AREQRC)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["add_claims_by_scope"] = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQRC["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
# User information, from scopes -> claims
assert "address" in res
assert "email" in res
# User info, requested by claims parameter
assert "nickname" in res
def test_client_claims_scopes_and_request_claims_one_match(self):
_req = AREQS.copy()
_req["claims"] = {"id_token": {"email": {"value": "<EMAIL>"}}}
session_id = self._create_session(_req)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["add_claims_by_scope"] = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=_req["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
# Email didn't match
assert "email" not in res
# Scope -> claims
assert "address" in res
| import json
import os
from cryptojwt.jws import jws
from cryptojwt.jwt import JWT
from cryptojwt.key_jar import KeyJar
from oidcmsg.oidc import AuthorizationRequest
from oidcmsg.oidc import RegistrationResponse
from oidcmsg.time_util import time_sans_frac
import pytest
from oidcendpoint.authn_event import create_authn_event
from oidcendpoint.client_authn import verify_client
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.id_token import IDToken
from oidcendpoint.id_token import get_sign_and_encrypt_algorithms
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.token import Token
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.user_info import UserInfo
KEYDEFS = [
{"type": "RSA", "key": "", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
]
BASEDIR = os.path.abspath(os.path.dirname(__file__))
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
USERS = json.loads(open(full_path("users.json")).read())
USERINFO = UserInfo(USERS)
AREQ = AuthorizationRequest(
response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid"],
state="state000",
nonce="nonce",
)
AREQS = AuthorizationRequest(
response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid", "address", "email"],
state="state000",
nonce="nonce",
)
AREQRC = AuthorizationRequest(
response_type="code",
client_id="client_1",
redirect_uri="http://example.com/authz",
scope=["openid", "address", "email"],
state="state000",
nonce="nonce",
claims={"id_token": {"nickname": None}}
)
conf = {
"issuer": "https://example.com/",
"password": "<PASSWORD>",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"keys": {"key_defs": KEYDEFS, "uri_path": "static/jwks.json"},
"jwks_uri": "https://example.com/jwks.json",
"endpoint": {
"authorization_endpoint": {
"path": "{}/authorization",
"class": Authorization,
"kwargs": {},
},
"token_endpoint": {"path": "{}/token", "class": Token, "kwargs": {}},
"userinfo_endpoint": {
"path": "{}/userinfo",
"class": userinfo.UserInfo,
"kwargs": {"db_file": "users.json"},
},
},
"authentication": {
"anon": {
"acr": INTERNETPROTOCOLPASSWORD,
"class": "oidcendpoint.user_authn.user.NoAuthn",
"kwargs": {"user": "diana"},
}
},
"userinfo": {"class": "oidcendpoint.user_info.UserInfo", "kwargs": {"db": USERS}, },
"client_authn": verify_client,
"template_dir": "template",
"id_token": {"class": IDToken, "kwargs": {"foo": "bar"}},
}
USER_ID = "diana"
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_idtoken(self):
self.endpoint_context = EndpointContext(conf)
self.endpoint_context.cdb["client_1"] = {
"client_secret": "hemligtochintekort",
"redirect_uris": [("https://example.com/cb", None)],
"client_salt": "salted",
"token_endpoint_auth_method": "client_secret_post",
"response_types": ["code", "token", "code id_token", "id_token"],
}
self.endpoint_context.keyjar.add_symmetric(
"client_1", "hemligtochintekort", ["sig", "enc"]
)
self.session_manager = self.endpoint_context.session_manager
self.user_id = USER_ID
def _create_session(self, auth_req, sub_type="public", sector_identifier=''):
if sector_identifier:
authz_req = auth_req.copy()
authz_req["sector_identifier_uri"] = sector_identifier
else:
authz_req = auth_req
client_id = authz_req['client_id']
ae = create_authn_event(self.user_id)
return self.session_manager.create_session(ae, authz_req, self.user_id,
client_id=client_id,
sub_type=sub_type)
def _mint_code(self, grant, session_id):
# Constructing an authorization code is now done
return grant.mint_token(
session_id=session_id,
endpoint_context=self.endpoint_context,
token_type="authorization_code",
token_handler=self.session_manager.token_handler["code"],
expires_at=time_sans_frac() + 300 # 5 minutes from now
)
def _mint_access_token(self, grant, session_id, token_ref):
return grant.mint_token(
session_id=session_id,
endpoint_context=self.endpoint_context,
token_type="access_token",
token_handler=self.session_manager.token_handler["access_token"],
expires_at=time_sans_frac() + 900, # 15 minutes from now
based_on=token_ref # Means the token (tok) was used to mint this token
)
def test_id_token_payload_0(self):
session_id = self._create_session(AREQ)
payload = self.endpoint_context.idtoken.payload(session_id)
assert set(payload.keys()) == {"sub", "nonce", "auth_time"}
def test_id_token_payload_with_code(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
code = self._mint_code(grant, session_id)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"], code=code.value
)
assert set(payload.keys()) == {"nonce", "c_hash", "sub", "auth_time"}
def test_id_token_payload_with_access_token(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
code = self._mint_code(grant, session_id)
access_token = self._mint_access_token(grant, session_id, code)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"], access_token=access_token.value
)
assert set(payload.keys()) == {"nonce", "at_hash", "sub", "auth_time"}
def test_id_token_payload_with_code_and_access_token(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
code = self._mint_code(grant, session_id)
access_token = self._mint_access_token(grant, session_id, code)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"], access_token=access_token.value, code=code.value
)
assert set(payload.keys()) == {"nonce", "c_hash", "at_hash", "sub", "auth_time"}
def test_id_token_payload_with_userinfo(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"given_name": None}}
payload = self.endpoint_context.idtoken.payload(session_id=session_id)
assert set(payload.keys()) == {"nonce", "given_name", "sub", "auth_time"}
def test_id_token_payload_many_0(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"given_name": None}}
code = self._mint_code(grant, session_id)
access_token = self._mint_access_token(grant, session_id, code)
payload = self.endpoint_context.idtoken.payload(
session_id, AREQ["client_id"],
access_token=access_token.value,
code=code.value
)
assert set(payload.keys()) == {"nonce", "c_hash", "at_hash", "sub", "auth_time",
"given_name"}
def test_sign_encrypt_id_token(self):
session_id = self._create_session(AREQ)
_token = self.endpoint_context.idtoken.sign_encrypt(session_id, AREQ['client_id'],
sign=True)
assert _token
_jws = jws.factory(_token)
assert _jws.jwt.headers["alg"] == "RS256"
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert isinstance(res, dict)
assert res["aud"] == ["client_1"]
def test_get_sign_algorithm(self):
client_info = self.endpoint_context.cdb[AREQ['client_id']]
algs = get_sign_and_encrypt_algorithms(
self.endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS256"}
def test_no_default_encrypt_algorithms(self):
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
args = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True, encrypt=True
)
assert args["sign_alg"] == "RS256"
assert args["enc_enc"] == "A128CBC-HS256"
assert args["enc_alg"] == "RSA1_5"
def test_get_sign_algorithm_2(self):
client_info = RegistrationResponse(id_token_signed_response_alg="RS512")
endpoint_context = EndpointContext(conf)
algs = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS512"}
def test_get_sign_algorithm_3(self):
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
endpoint_context.jwx_def["signing_alg"] = {"id_token": "RS384"}
algs = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS384"}
def test_get_sign_algorithm_4(self):
client_info = RegistrationResponse(id_token_signed_response_alg="RS512")
endpoint_context = EndpointContext(conf)
endpoint_context.jwx_def["signing_alg"] = {"id_token": "RS384"}
algs = get_sign_and_encrypt_algorithms(
endpoint_context, client_info, "id_token", sign=True
)
# default signing alg
assert algs == {"sign": True, "encrypt": False, "sign_alg": "RS512"}
def test_available_claims(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"nickname": {"essential": True}}}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "nickname" in res
def test_no_available_claims(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
grant.claims = {"id_token": {"foobar": None}}
req = {"client_id": "client_1"}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "foobar" not in res
def test_client_claims(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["enable_claims_per_client"] = True
self.endpoint_context.cdb["client_1"]["id_token_claims"] = {"address": None}
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQ["scope"], usage="id_token")
grant.claims = {'id_token': _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "address" in res
assert "nickname" not in res
def test_client_claims_with_default(self):
session_id = self._create_session(AREQ)
grant = self.session_manager[session_id]
# self.endpoint_context.cdb["client_1"]["id_token_claims"] = {"address": None}
# self.endpoint_context.idtoken.enable_claims_per_client = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQ["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
# No user info claims should be there
assert "address" not in res
assert "nickname" not in res
def test_client_claims_scopes(self):
session_id = self._create_session(AREQS)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["add_claims_by_scope"] = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQS["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
assert "address" in res
assert "email" in res
assert "nickname" not in res
def test_client_claims_scopes_and_request_claims_no_match(self):
session_id = self._create_session(AREQRC)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["add_claims_by_scope"] = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=AREQRC["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
# User information, from scopes -> claims
assert "address" in res
assert "email" in res
# User info, requested by claims parameter
assert "nickname" in res
def test_client_claims_scopes_and_request_claims_one_match(self):
_req = AREQS.copy()
_req["claims"] = {"id_token": {"email": {"value": "<EMAIL>"}}}
session_id = self._create_session(_req)
grant = self.session_manager[session_id]
self.endpoint_context.idtoken.kwargs["add_claims_by_scope"] = True
_claims = self.endpoint_context.claims_interface.get_claims(
session_id=session_id, scopes=_req["scope"], usage="id_token")
grant.claims = {"id_token": _claims}
_token = self.endpoint_context.idtoken.make(session_id=session_id)
assert _token
client_keyjar = KeyJar()
_jwks = self.endpoint_context.keyjar.export_jwks()
client_keyjar.import_jwks(_jwks, self.endpoint_context.issuer)
_jwt = JWT(key_jar=client_keyjar, iss="client_1")
res = _jwt.unpack(_token)
# Email didn't match
assert "email" not in res
# Scope -> claims
assert "address" in res | en | 0.661348 | # Constructing an authorization code is now done # 5 minutes from now # 15 minutes from now # Means the token (tok) was used to mint this token # default signing alg # default signing alg # default signing alg # default signing alg # self.endpoint_context.cdb["client_1"]["id_token_claims"] = {"address": None} # self.endpoint_context.idtoken.enable_claims_per_client = True # No user info claims should be there # User information, from scopes -> claims # User info, requested by claims parameter # Email didn't match # Scope -> claims | 2.060839 | 2 |
abc229/abc229_a.py | Vermee81/practice-coding-contests | 0 | 6622783 | <gh_stars>0
# https://atcoder.jp/contests/abc229/tasks/abc229_a
S, T, X = map(int, input().split())
if S == T:
print("No")
exit()
if S > T:
ans = "Yes" if X >= S or X < T else "No"
print(ans)
exit()
ans = "Yes" if S <= X < T else "No"
print(ans)
| # https://atcoder.jp/contests/abc229/tasks/abc229_a
S, T, X = map(int, input().split())
if S == T:
print("No")
exit()
if S > T:
ans = "Yes" if X >= S or X < T else "No"
print(ans)
exit()
ans = "Yes" if S <= X < T else "No"
print(ans) | en | 0.425791 | # https://atcoder.jp/contests/abc229/tasks/abc229_a | 3.359666 | 3 |
vcfModifier/vcfIntersector.py | ccgenomics/somaticseq | 2 | 6622784 | #!/usr/bin/env python3
import sys, os, argparse, gzip, re, subprocess, uuid
MY_DIR = os.path.dirname(os.path.realpath(__file__))
PRE_DIR = os.path.join(MY_DIR, os.pardir)
sys.path.append( PRE_DIR )
import genomicFileHandler.genomic_file_handlers as genome
def bed_include(infile, inclusion_region, outfile):
assert infile != outfile
if inclusion_region:
exit_code = os.system( 'intersectBed -header -a {} -b {} | uniq > {}'.format(infile, inclusion_region, outfile) )
assert exit_code == 0
else:
outfile = None
return outfile
def bed_exclude(infile, exclusion_region, outfile):
assert infile != outfile
if exclusion_region:
exit_code = os.system( 'intersectBed -header -a {} -b {} -v | uniq > {}'.format(infile, exclusion_region, outfile) )
assert exit_code == 0
else:
outfile = None
return outfile
def bed_intersector(infile, outfile, inclusion_region=None, exclusion_region=None):
assert infile != outfile
from shutil import copyfile
# Get the input file name minus the extention, and also get the extension
infile_noext = re.sub(r'\.\w+$', '', infile)
file_ext = re.search(r'\.\w+$', infile).group()
temp_files = []
if inclusion_region:
included_temp_file = infile_noext + uuid.uuid4().hex + file_ext
exit_code = os.system( 'intersectBed -header -a {} -b {} | uniq > {}'.format(infile, inclusion_region, included_temp_file) )
assert exit_code == 0
infile = included_temp_file
temp_files.append( included_temp_file )
if exclusion_region:
exit_code = os.system( 'intersectBed -header -a {} -b {} -v | uniq > {}'.format(infile, exclusion_region, outfile) )
assert exit_code == 0
if inclusion_region and not exclusion_region:
copyfile(included_temp_file, outfile)
elif not (inclusion_region or exclusion_region):
if infile.endswith('.gz'):
exit_code = os.system( 'gunzip -c {} > {}'.format(infile, outfile) )
assert exit_code == 0
else:
copyfile(infile, outfile)
for file_i in temp_files:
os.remove( file_i )
return outfile
# Use utilities/vcfsorter.pl fa.dict unsorted.vcf > sorted.vcf
def vcfsorter(ref, vcfin, vcfout):
#vcfsort = '{}/utilities/vcfsorter.pl'.format(PRE_DIR)
#os.system( '{} {} {} > {}'.format(vcfsort, hg_dict, vcfin, vcfout ) )
fai = ref + '.fai'
exit_code = os.system('bedtools sort -faidx {} -header -i {} > {}'.format(fai, vcfin, vcfout))
assert exit_code == 0
| #!/usr/bin/env python3
import sys, os, argparse, gzip, re, subprocess, uuid
MY_DIR = os.path.dirname(os.path.realpath(__file__))
PRE_DIR = os.path.join(MY_DIR, os.pardir)
sys.path.append( PRE_DIR )
import genomicFileHandler.genomic_file_handlers as genome
def bed_include(infile, inclusion_region, outfile):
assert infile != outfile
if inclusion_region:
exit_code = os.system( 'intersectBed -header -a {} -b {} | uniq > {}'.format(infile, inclusion_region, outfile) )
assert exit_code == 0
else:
outfile = None
return outfile
def bed_exclude(infile, exclusion_region, outfile):
assert infile != outfile
if exclusion_region:
exit_code = os.system( 'intersectBed -header -a {} -b {} -v | uniq > {}'.format(infile, exclusion_region, outfile) )
assert exit_code == 0
else:
outfile = None
return outfile
def bed_intersector(infile, outfile, inclusion_region=None, exclusion_region=None):
assert infile != outfile
from shutil import copyfile
# Get the input file name minus the extention, and also get the extension
infile_noext = re.sub(r'\.\w+$', '', infile)
file_ext = re.search(r'\.\w+$', infile).group()
temp_files = []
if inclusion_region:
included_temp_file = infile_noext + uuid.uuid4().hex + file_ext
exit_code = os.system( 'intersectBed -header -a {} -b {} | uniq > {}'.format(infile, inclusion_region, included_temp_file) )
assert exit_code == 0
infile = included_temp_file
temp_files.append( included_temp_file )
if exclusion_region:
exit_code = os.system( 'intersectBed -header -a {} -b {} -v | uniq > {}'.format(infile, exclusion_region, outfile) )
assert exit_code == 0
if inclusion_region and not exclusion_region:
copyfile(included_temp_file, outfile)
elif not (inclusion_region or exclusion_region):
if infile.endswith('.gz'):
exit_code = os.system( 'gunzip -c {} > {}'.format(infile, outfile) )
assert exit_code == 0
else:
copyfile(infile, outfile)
for file_i in temp_files:
os.remove( file_i )
return outfile
# Use utilities/vcfsorter.pl fa.dict unsorted.vcf > sorted.vcf
def vcfsorter(ref, vcfin, vcfout):
#vcfsort = '{}/utilities/vcfsorter.pl'.format(PRE_DIR)
#os.system( '{} {} {} > {}'.format(vcfsort, hg_dict, vcfin, vcfout ) )
fai = ref + '.fai'
exit_code = os.system('bedtools sort -faidx {} -header -i {} > {}'.format(fai, vcfin, vcfout))
assert exit_code == 0
| en | 0.446376 | #!/usr/bin/env python3 # Get the input file name minus the extention, and also get the extension # Use utilities/vcfsorter.pl fa.dict unsorted.vcf > sorted.vcf #vcfsort = '{}/utilities/vcfsorter.pl'.format(PRE_DIR) #os.system( '{} {} {} > {}'.format(vcfsort, hg_dict, vcfin, vcfout ) ) | 2.279131 | 2 |
Test/LambOseenTest.py | gforsyth/pyfmm | 11 | 6622785 | <reponame>gforsyth/pyfmm<gh_stars>10-100
"""
Fast Multipole Method test.
Solving Vortex Blob Method for Lamb Oseen test case.
The number of particles of the problem depends on the
size of the computational domain
Usage:
-l <number> Number of levels for the FMM
-p <number> Truncation Number for the FMM
-n <number> Size of the computational domain. Values 0 to 90
-d <number> Particles distribution:
0: Lattice distribution
1: Triangular distribution
2: Single particle distribution
-i <number> Vorticity initialization
0: Lamb Oseen
1: Random vorticity
2: Single particle
-h Show help
"""
from numpy import *
import profile
import pstats
import os
import time
import getopt
import csv
import sys
## Import local modules ----------------------------------------------
from pyFMM.FastMultipole.fastMultipoleMethod import FMMevalVelocity
from velocityBlobs import *
from simlogger import *
from particleDistribution import *
## Constants
# particles distribution
LATTICE_DIST = 0
TRIANGULAR_DIST = 1
QUASI_RAND_DIST = 2
# Vorticity initialization
LAMBOSEEN_INI = 0
RAMDOM_VORT_INI = 1
SINGLE_PART_INI = 2
# general constants
EPS = 10**(-20) # epsilon machine
SIM_DOMAIN_SIZE = mgrid[1.:10.1:.1]
# function that computes the Lamb Oseen Vorticity
def lambOseen(gamma, nu, z, t):
#centerLambOseen = 0.1 - 0.1j
centerLambOseen = 0j
r = abs(centerLambOseen - z)
c0 = 4. * nu * t
c1 = gamma / math.pi
vort = (c1 / c0) * exp (- r**2 / c0)
return vort
# function that computes the Lamb Oseen Velocity
def lambOseenVel(gamma, nu, z, t):
centerLambOseen = 0j
r = abs(z - centerLambOseen) + EPS
nr2 = - r**2
c0 = gamma / (2. * math.pi * r**2)
c1 = nr2 / (4. * nu * t)
vel = c0 * (1. - exp (c1))
vel = (-z.imag + z.real * 1j) * vel
return vel
def main():
## Default parameters
## FMM Parameters -----------------------------------------------
level_param = 3
p_param = 5
h1_param = 0
h2_param = 0
# simulation parameters
save_log = False # save sim data in the log file
save_run = True # save sim information
simulation_size = 0
compare_analytical = 1 # compare FMM vs analytical problem
## input program parameters -----------------------------------
particle_distribution = LATTICE_DIST # default distribution of the particles
vorticity_distribution = LAMBOSEEN_INI # default distribution of the initial vorticity
gamma = 1. # gamma parameter of the lamb Oseen
nu = 0.0005 # viscosity value
k = 2. # blob parameter k
sigma = 0.02 # particle radius
overlap = 0.8 # overlap ratio
tini = 4. # initial time for the simulation
tend = 4. # end time of the simulation
dt = 0.02 # time step of the simulation
steps = int((tend - tini) / dt) # number of simulation steps
s_grid = SIM_DOMAIN_SIZE[simulation_size] # the side of the grid
print "TADA:::: ", s_grid/2
## Variables calculations
h = overlap * sigma # spacing of the particles
sigma2 = sigma**2
t = tini # time for the first step
time_antiDiff = sigma2 / (2. * nu) # Time "anti-diffusion" correction
noise = 1. * h # noise value is X times the lattice step
## parse command line options------------------------------------
try:
opts, args = getopt.getopt(sys.argv[1:], 'sghp:l:u:v:n:d:i:', ['help', 'saveGraph', 'level:'])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
# process help option
if o in ('-h', '--help'):
print __doc__
sys.exit(0)
# process truncation parameter
if o in ('-p'):
p_param = int(a)
# process level parameter
if o in ('-l', '--level'):
level_param = int(a)
# process FMM h1 shift parameter
if o in ('-u'):
h1_param = int(a)
# process FMM h2 parameter
if o in ('-v'):
h2_param = int(a)
# process simulation size parameter
if o in ('-n'):
simulation_size = int(a)
if (simulation_size < len(SIM_DOMAIN_SIZE)):
s_grid = SIM_DOMAIN_SIZE[simulation_size]
# process particle distribution parameter
if o in ('-d'):
particle_distribution = int(a)
# process vorticity distribution parameter
if o in ('-i'):
vorticity_distribution = int(a)
## Initialization -----------------------------------------------
# create the grid that contains the coordinates x,y
z = ''
if particle_distribution == LATTICE_DIST:
z = latticeDistribution(-s_grid/2, s_grid/2, -s_grid/2, s_grid/2, h)
elif particle_distribution == TRIANGULAR_DIST:
z = triangleDistribution(-s_grid/2, s_grid/2, -s_grid/2, s_grid/2, h)
elif particle_distribution == QUASI_RAND_DIST:
z = quasiRandomDistribution(-s_grid/2, s_grid/2, -s_grid/2, s_grid/2, h, noise)
# Initialize the particles circulation
w = lambOseen(gamma, nu, z, t - time_antiDiff) # vorticity of the blob with time shifting fix
circulation = w * h**2
vel = zeros((len(z)), complex)
print '\tNumber of blobs: ' + str(len(vel))
print '\tTotal circulation in particles: ' + str(sum(circulation))
################## Experiment:
timeTotal = time.clock()
print 'FMM Started'
# Calculate velocity FMM
fmmTime = time.clock()
circulation, z, tree, vel = FMMevalVelocity(level_param, p_param, circulation, z, vel, sigma2, k)
fmmTime = time.clock() - fmmTime
print 'FMM Finished'
vel2 = []
directTime = 0
if (compare_analytical == 0):
print 'Direct Started'
# computation of the error against the DIRECT calculation
directTime = time.clock()
vel2 = evalVelocityArr(circulation, z, sigma2, k)
directTime = time.clock() - directTime
print 'Direct Finished'
else:
# computation of error agains the ANALYTIC solution
vel2 = lambOseenVel(gamma, nu, z, t)
error = vel - vel2
errorRel = log10(EPS + abs(error) / (max(abs(vel2)) + EPS))
errorL2 = linalg.norm(error) / linalg.norm(vel2)
# computes the total time
timeTotal = time.clock() - timeTotal
############### End Experiment
print 'Problem: LambOseen'
print 'Vortex Blob Method:'
print '\tNumber of blobs: ' + str(len(vel))
print 'Fast Multipole Method:'
print '\tNumber of levels: ' + str(level_param)
print '\tTruncation number: ' + str(p_param)
print '\tParticles per box: ' + str(len(vel)/(4**level_param))
print '\tTotal circulation in particles: ' + str(sum(circulation))
# Time Measuring
print 'Time:'
print '\t Direct: ' + str(directTime)
print '\t FMM: ' + str(fmmTime)
print '\t Total: ' + str(timeTotal)
print 'Velocity:'
print '\tMax vel: ' + str(max(abs(vel)))
print '\tMin vel: ' + str(min(abs(vel)))
print '\tMean vel: ' + str(mean(abs(vel)))
print 'Relative Error:'
print '\tLog Max error: ' + str(max(errorRel))
print '\tLog Min error: ' + str(min(errorRel))
print '\tError L2: ' + str(errorL2)
maxError = -100
maxErrorZ = 0j
errorPosition = 0
for i in range(len(errorRel)):
if errorRel[i] > maxError:
errorPosition = i
maxError = errorRel[i]
maxErrorZ = z[i]
print 'Pos Max Error Rel: ' + str(maxErrorZ)
print 'Direct Value:\t' + str(vel2[errorPosition])
print 'FMM Value:\t' + str(vel[errorPosition])
if save_run:
sim_str = 'N' + str(len(vel)) + '_L' + str(level_param) + '_P' + str(p_param)
print sim_str
# Create a new Logger
simulation_folder = 'Batch_' +'VORT' + str(vorticity_distribution) + '_DIST' + str(particle_distribution)
logger = SimLogger('SimulationLogger' + '_N' + str(len(vel)), simulation_folder)
# Save run info
runInfo = logger.csvOutputLog('runData')
runInfo.addElement(len(vel)) # Number of Blobs
runInfo.addElement(level_param) # FMM setup - Levels
runInfo.addElement(p_param) # FMM setup - Truncation
# Comparision against DIRECT calculation
runInfo.addElement(max(errorRel)) # save max velocity error
runInfo.addElement(min(errorRel)) # save min velocity error
runInfo.addElement(errorL2) # save L-2 norm velocity error
# Time results
runInfo.addElement(fmmTime) # save time elapsed for FMM
runInfo.addElement(directTime) # save time elapsed for DIRECT calculation
runInfo.addElement(directTime/fmmTime) # acceleration ratio
runInfo.flushRow()
# Save error data
logger.saveData(sim_str + '_simulation_Error',
POINTS_DIST_SCATTER, [z.real, z.imag, errorRel]) # Data from error
# Run Main function
if __name__ == "__main__":
main()
| """
Fast Multipole Method test.
Solving Vortex Blob Method for Lamb Oseen test case.
The number of particles of the problem depends on the
size of the computational domain
Usage:
-l <number> Number of levels for the FMM
-p <number> Truncation Number for the FMM
-n <number> Size of the computational domain. Values 0 to 90
-d <number> Particles distribution:
0: Lattice distribution
1: Triangular distribution
2: Single particle distribution
-i <number> Vorticity initialization
0: Lamb Oseen
1: Random vorticity
2: Single particle
-h Show help
"""
from numpy import *
import profile
import pstats
import os
import time
import getopt
import csv
import sys
## Import local modules ----------------------------------------------
from pyFMM.FastMultipole.fastMultipoleMethod import FMMevalVelocity
from velocityBlobs import *
from simlogger import *
from particleDistribution import *
## Constants
# particles distribution
LATTICE_DIST = 0
TRIANGULAR_DIST = 1
QUASI_RAND_DIST = 2
# Vorticity initialization
LAMBOSEEN_INI = 0
RAMDOM_VORT_INI = 1
SINGLE_PART_INI = 2
# general constants
EPS = 10**(-20) # epsilon machine
SIM_DOMAIN_SIZE = mgrid[1.:10.1:.1]
# function that computes the Lamb Oseen Vorticity
def lambOseen(gamma, nu, z, t):
#centerLambOseen = 0.1 - 0.1j
centerLambOseen = 0j
r = abs(centerLambOseen - z)
c0 = 4. * nu * t
c1 = gamma / math.pi
vort = (c1 / c0) * exp (- r**2 / c0)
return vort
# function that computes the Lamb Oseen Velocity
def lambOseenVel(gamma, nu, z, t):
centerLambOseen = 0j
r = abs(z - centerLambOseen) + EPS
nr2 = - r**2
c0 = gamma / (2. * math.pi * r**2)
c1 = nr2 / (4. * nu * t)
vel = c0 * (1. - exp (c1))
vel = (-z.imag + z.real * 1j) * vel
return vel
def main():
## Default parameters
## FMM Parameters -----------------------------------------------
level_param = 3
p_param = 5
h1_param = 0
h2_param = 0
# simulation parameters
save_log = False # save sim data in the log file
save_run = True # save sim information
simulation_size = 0
compare_analytical = 1 # compare FMM vs analytical problem
## input program parameters -----------------------------------
particle_distribution = LATTICE_DIST # default distribution of the particles
vorticity_distribution = LAMBOSEEN_INI # default distribution of the initial vorticity
gamma = 1. # gamma parameter of the lamb Oseen
nu = 0.0005 # viscosity value
k = 2. # blob parameter k
sigma = 0.02 # particle radius
overlap = 0.8 # overlap ratio
tini = 4. # initial time for the simulation
tend = 4. # end time of the simulation
dt = 0.02 # time step of the simulation
steps = int((tend - tini) / dt) # number of simulation steps
s_grid = SIM_DOMAIN_SIZE[simulation_size] # the side of the grid
print "TADA:::: ", s_grid/2
## Variables calculations
h = overlap * sigma # spacing of the particles
sigma2 = sigma**2
t = tini # time for the first step
time_antiDiff = sigma2 / (2. * nu) # Time "anti-diffusion" correction
noise = 1. * h # noise value is X times the lattice step
## parse command line options------------------------------------
try:
opts, args = getopt.getopt(sys.argv[1:], 'sghp:l:u:v:n:d:i:', ['help', 'saveGraph', 'level:'])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
# process help option
if o in ('-h', '--help'):
print __doc__
sys.exit(0)
# process truncation parameter
if o in ('-p'):
p_param = int(a)
# process level parameter
if o in ('-l', '--level'):
level_param = int(a)
# process FMM h1 shift parameter
if o in ('-u'):
h1_param = int(a)
# process FMM h2 parameter
if o in ('-v'):
h2_param = int(a)
# process simulation size parameter
if o in ('-n'):
simulation_size = int(a)
if (simulation_size < len(SIM_DOMAIN_SIZE)):
s_grid = SIM_DOMAIN_SIZE[simulation_size]
# process particle distribution parameter
if o in ('-d'):
particle_distribution = int(a)
# process vorticity distribution parameter
if o in ('-i'):
vorticity_distribution = int(a)
## Initialization -----------------------------------------------
# create the grid that contains the coordinates x,y
z = ''
if particle_distribution == LATTICE_DIST:
z = latticeDistribution(-s_grid/2, s_grid/2, -s_grid/2, s_grid/2, h)
elif particle_distribution == TRIANGULAR_DIST:
z = triangleDistribution(-s_grid/2, s_grid/2, -s_grid/2, s_grid/2, h)
elif particle_distribution == QUASI_RAND_DIST:
z = quasiRandomDistribution(-s_grid/2, s_grid/2, -s_grid/2, s_grid/2, h, noise)
# Initialize the particles circulation
w = lambOseen(gamma, nu, z, t - time_antiDiff) # vorticity of the blob with time shifting fix
circulation = w * h**2
vel = zeros((len(z)), complex)
print '\tNumber of blobs: ' + str(len(vel))
print '\tTotal circulation in particles: ' + str(sum(circulation))
################## Experiment:
timeTotal = time.clock()
print 'FMM Started'
# Calculate velocity FMM
fmmTime = time.clock()
circulation, z, tree, vel = FMMevalVelocity(level_param, p_param, circulation, z, vel, sigma2, k)
fmmTime = time.clock() - fmmTime
print 'FMM Finished'
vel2 = []
directTime = 0
if (compare_analytical == 0):
print 'Direct Started'
# computation of the error against the DIRECT calculation
directTime = time.clock()
vel2 = evalVelocityArr(circulation, z, sigma2, k)
directTime = time.clock() - directTime
print 'Direct Finished'
else:
# computation of error agains the ANALYTIC solution
vel2 = lambOseenVel(gamma, nu, z, t)
error = vel - vel2
errorRel = log10(EPS + abs(error) / (max(abs(vel2)) + EPS))
errorL2 = linalg.norm(error) / linalg.norm(vel2)
# computes the total time
timeTotal = time.clock() - timeTotal
############### End Experiment
print 'Problem: LambOseen'
print 'Vortex Blob Method:'
print '\tNumber of blobs: ' + str(len(vel))
print 'Fast Multipole Method:'
print '\tNumber of levels: ' + str(level_param)
print '\tTruncation number: ' + str(p_param)
print '\tParticles per box: ' + str(len(vel)/(4**level_param))
print '\tTotal circulation in particles: ' + str(sum(circulation))
# Time Measuring
print 'Time:'
print '\t Direct: ' + str(directTime)
print '\t FMM: ' + str(fmmTime)
print '\t Total: ' + str(timeTotal)
print 'Velocity:'
print '\tMax vel: ' + str(max(abs(vel)))
print '\tMin vel: ' + str(min(abs(vel)))
print '\tMean vel: ' + str(mean(abs(vel)))
print 'Relative Error:'
print '\tLog Max error: ' + str(max(errorRel))
print '\tLog Min error: ' + str(min(errorRel))
print '\tError L2: ' + str(errorL2)
maxError = -100
maxErrorZ = 0j
errorPosition = 0
for i in range(len(errorRel)):
if errorRel[i] > maxError:
errorPosition = i
maxError = errorRel[i]
maxErrorZ = z[i]
print 'Pos Max Error Rel: ' + str(maxErrorZ)
print 'Direct Value:\t' + str(vel2[errorPosition])
print 'FMM Value:\t' + str(vel[errorPosition])
if save_run:
sim_str = 'N' + str(len(vel)) + '_L' + str(level_param) + '_P' + str(p_param)
print sim_str
# Create a new Logger
simulation_folder = 'Batch_' +'VORT' + str(vorticity_distribution) + '_DIST' + str(particle_distribution)
logger = SimLogger('SimulationLogger' + '_N' + str(len(vel)), simulation_folder)
# Save run info
runInfo = logger.csvOutputLog('runData')
runInfo.addElement(len(vel)) # Number of Blobs
runInfo.addElement(level_param) # FMM setup - Levels
runInfo.addElement(p_param) # FMM setup - Truncation
# Comparision against DIRECT calculation
runInfo.addElement(max(errorRel)) # save max velocity error
runInfo.addElement(min(errorRel)) # save min velocity error
runInfo.addElement(errorL2) # save L-2 norm velocity error
# Time results
runInfo.addElement(fmmTime) # save time elapsed for FMM
runInfo.addElement(directTime) # save time elapsed for DIRECT calculation
runInfo.addElement(directTime/fmmTime) # acceleration ratio
runInfo.flushRow()
# Save error data
logger.saveData(sim_str + '_simulation_Error',
POINTS_DIST_SCATTER, [z.real, z.imag, errorRel]) # Data from error
# Run Main function
if __name__ == "__main__":
main() | en | 0.448846 | Fast Multipole Method test.
Solving Vortex Blob Method for Lamb Oseen test case.
The number of particles of the problem depends on the
size of the computational domain
Usage:
-l <number> Number of levels for the FMM
-p <number> Truncation Number for the FMM
-n <number> Size of the computational domain. Values 0 to 90
-d <number> Particles distribution:
0: Lattice distribution
1: Triangular distribution
2: Single particle distribution
-i <number> Vorticity initialization
0: Lamb Oseen
1: Random vorticity
2: Single particle
-h Show help ## Import local modules ---------------------------------------------- ## Constants # particles distribution # Vorticity initialization # general constants # epsilon machine # function that computes the Lamb Oseen Vorticity #centerLambOseen = 0.1 - 0.1j # function that computes the Lamb Oseen Velocity ## Default parameters ## FMM Parameters ----------------------------------------------- # simulation parameters # save sim data in the log file # save sim information # compare FMM vs analytical problem ## input program parameters ----------------------------------- # default distribution of the particles # default distribution of the initial vorticity # gamma parameter of the lamb Oseen # viscosity value # blob parameter k # particle radius # overlap ratio # initial time for the simulation # end time of the simulation # time step of the simulation # number of simulation steps # the side of the grid ## Variables calculations # spacing of the particles # time for the first step # Time "anti-diffusion" correction # noise value is X times the lattice step ## parse command line options------------------------------------ # process options # process help option # process truncation parameter # process level parameter # process FMM h1 shift parameter # process FMM h2 parameter # process simulation size parameter # process particle distribution parameter # process vorticity distribution parameter ## Initialization ----------------------------------------------- # create the grid that contains the coordinates x,y # Initialize the particles circulation # vorticity of the blob with time shifting fix ################## Experiment: # Calculate velocity FMM # computation of the error against the DIRECT calculation # computation of error agains the ANALYTIC solution # computes the total time ############### End Experiment # Time Measuring # Create a new Logger # Save run info # Number of Blobs # FMM setup - Levels # FMM setup - Truncation # Comparision against DIRECT calculation # save max velocity error # save min velocity error # save L-2 norm velocity error # Time results # save time elapsed for FMM # save time elapsed for DIRECT calculation # acceleration ratio # Save error data # Data from error # Run Main function | 2.675387 | 3 |
problem0385.py | kmarcini/Project-Euler-Python | 0 | 6622786 | <filename>problem0385.py
###########################
#
# #385 Ellipses inside triangles - Project Euler
# https://projecteuler.net/problem=385
#
# Code by <NAME>
#
###########################
| <filename>problem0385.py
###########################
#
# #385 Ellipses inside triangles - Project Euler
# https://projecteuler.net/problem=385
#
# Code by <NAME>
#
###########################
| de | 0.34224 | ########################### # # #385 Ellipses inside triangles - Project Euler # https://projecteuler.net/problem=385 # # Code by <NAME> # ########################### | 1.996838 | 2 |
gpio.py | Heych88/Intel_edision_rotor_control | 0 | 6622787 | <reponame>Heych88/Intel_edision_rotor_control
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import mraa
class gpio():
def __init__(self, pin, dir=mraa.DIR_OUT, resistor=None):
self.pin_num = pin
self.set_pin(pin)
self.resistor = resistor
self.set_dir(dir, resistor=resistor)
# Interrupt types
self.RISING = mraa.EDGE_RISING
self.FALLING = mraa.EDGE_FALLING
self.BOTH = mraa.EDGE_BOTH
# IO resistor type
self.OUT = mraa.DIR_OUT
self.IN = mraa.DIR_IN
self.PULL_UP = mraa.DIR_OUT_HIGH
self.PULL_DOWN = mraa.DIR_OUT_LOW
def set_pin(self, pin):
"""
Set the IO pin used for Gpio
:param pin: pin number
"""
if pin not in range(0, 14):
raise Exception("Incorrect pin {} selected. Pins available (0 to 13)".format(pin))
else:
self.pin = pin
self.gpio_pin = mraa.Gpio(pin)
def set_dir(self, dir, resistor=None):
"""
Set the IO pins direction of use as either input or output.
If resistor is not None, only an output direction on the pin can be used.
:param dir: input (IN) or output (OUT) direction
:param resistor: None -> do not use a pull up resistor,
'UP' -> use a pull up resistor,
'DOWN' -> use a pull down resistor
"""
self.IN = mraa.DIR_IN
self.OUT = mraa.DIR_OUT
self.PULL_UP = mraa.DIR_OUT_HIGH
self.PULL_DOWN = mraa.DIR_OUT_LOW
if dir not in (mraa.DIR_OUT, mraa.DIR_IN):
# incorrect arguments passed in
raise Exception("Incorrect pin direction dir={}. Use 'gpio.IN' or 'gpio.OUT'".format(dir))
elif resistor not in (None, self.PULL_UP, self.PULL_DOWN):
# incorrect arguments passed in
raise Exception("Incorrect resistor={}. Use 'UP' or 'Down'".format(resistor))
elif dir is self.IN:
self.dir = dir
self.gpio_pin.dir(self.IN)
if resistor is not None:
raise Warning('default', 'Pin dir is {} but should be \'None\' when using resistor'.format(dir))
elif resistor is not None:
self.resistor = resistor
self.dir = dir
# default to only output
if resistor is self.PULL_UP:
self.gpio_pin.dir(mraa.DIR_OUT_HIGH)
else:
self.gpio_pin.dir(mraa.DIR_OUT_LOW)
else:
self.resistor = resistor
self.dir = dir
# default to only output
self.gpio_pin.dir(mraa.DIR_OUT)
def set_high(self):
self.gpio_pin.write(1)
def set_low(self):
self.gpio_pin.write(0)
def read(self):
self.gpio_pin.read()
def read_dir(self):
self.gpio_pin.readDir()
def isr_catch(self, *args):
print('Hello')
def interrupt(self, edge, *args):
if edge not in (self.BOTH, self.FALLING, self.RISING):
# incorrect arguments passed in
raise Exception("Incorrect edge supplied. edge={}. Use gpio.BOTH, gpio.FALLING or gpio.RISING".format(edge))
else:
self.gpio_pin.isr(edge, self.isr_catch(), args)
#self.gpio_pin.edge()
#self.gpio_pin.isr()
#self.gpio_pin.isrExit()
#self.gpio_pin.getPin()
#self.gpio_pin.mode()
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import mraa
class gpio():
def __init__(self, pin, dir=mraa.DIR_OUT, resistor=None):
self.pin_num = pin
self.set_pin(pin)
self.resistor = resistor
self.set_dir(dir, resistor=resistor)
# Interrupt types
self.RISING = mraa.EDGE_RISING
self.FALLING = mraa.EDGE_FALLING
self.BOTH = mraa.EDGE_BOTH
# IO resistor type
self.OUT = mraa.DIR_OUT
self.IN = mraa.DIR_IN
self.PULL_UP = mraa.DIR_OUT_HIGH
self.PULL_DOWN = mraa.DIR_OUT_LOW
def set_pin(self, pin):
"""
Set the IO pin used for Gpio
:param pin: pin number
"""
if pin not in range(0, 14):
raise Exception("Incorrect pin {} selected. Pins available (0 to 13)".format(pin))
else:
self.pin = pin
self.gpio_pin = mraa.Gpio(pin)
def set_dir(self, dir, resistor=None):
"""
Set the IO pins direction of use as either input or output.
If resistor is not None, only an output direction on the pin can be used.
:param dir: input (IN) or output (OUT) direction
:param resistor: None -> do not use a pull up resistor,
'UP' -> use a pull up resistor,
'DOWN' -> use a pull down resistor
"""
self.IN = mraa.DIR_IN
self.OUT = mraa.DIR_OUT
self.PULL_UP = mraa.DIR_OUT_HIGH
self.PULL_DOWN = mraa.DIR_OUT_LOW
if dir not in (mraa.DIR_OUT, mraa.DIR_IN):
# incorrect arguments passed in
raise Exception("Incorrect pin direction dir={}. Use 'gpio.IN' or 'gpio.OUT'".format(dir))
elif resistor not in (None, self.PULL_UP, self.PULL_DOWN):
# incorrect arguments passed in
raise Exception("Incorrect resistor={}. Use 'UP' or 'Down'".format(resistor))
elif dir is self.IN:
self.dir = dir
self.gpio_pin.dir(self.IN)
if resistor is not None:
raise Warning('default', 'Pin dir is {} but should be \'None\' when using resistor'.format(dir))
elif resistor is not None:
self.resistor = resistor
self.dir = dir
# default to only output
if resistor is self.PULL_UP:
self.gpio_pin.dir(mraa.DIR_OUT_HIGH)
else:
self.gpio_pin.dir(mraa.DIR_OUT_LOW)
else:
self.resistor = resistor
self.dir = dir
# default to only output
self.gpio_pin.dir(mraa.DIR_OUT)
def set_high(self):
self.gpio_pin.write(1)
def set_low(self):
self.gpio_pin.write(0)
def read(self):
self.gpio_pin.read()
def read_dir(self):
self.gpio_pin.readDir()
def isr_catch(self, *args):
print('Hello')
def interrupt(self, edge, *args):
if edge not in (self.BOTH, self.FALLING, self.RISING):
# incorrect arguments passed in
raise Exception("Incorrect edge supplied. edge={}. Use gpio.BOTH, gpio.FALLING or gpio.RISING".format(edge))
else:
self.gpio_pin.isr(edge, self.isr_catch(), args)
#self.gpio_pin.edge()
#self.gpio_pin.isr()
#self.gpio_pin.isrExit()
#self.gpio_pin.getPin()
#self.gpio_pin.mode() | en | 0.431682 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Interrupt types # IO resistor type Set the IO pin used for Gpio :param pin: pin number Set the IO pins direction of use as either input or output. If resistor is not None, only an output direction on the pin can be used. :param dir: input (IN) or output (OUT) direction :param resistor: None -> do not use a pull up resistor, 'UP' -> use a pull up resistor, 'DOWN' -> use a pull down resistor # incorrect arguments passed in # incorrect arguments passed in # default to only output # default to only output # incorrect arguments passed in #self.gpio_pin.edge() #self.gpio_pin.isr() #self.gpio_pin.isrExit() #self.gpio_pin.getPin() #self.gpio_pin.mode() | 2.945903 | 3 |
plotting.py | harryturr/analysis | 0 | 6622788 | #!/usr/bin/env python2
# <NAME> 2019
# @harryturr
import numpy as np
import os
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
file_number = np.array([%s]) % #number of ifle
label_list = np.array([%s]) % #label
filename_prefix = 'prefix'
filename_suffix = 'suffix'
vcolumn = 1
dfcolumn = 2
disscolumn = 3
ampcolumn = 4
# moving average box by convolution
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def minimum(y, pts):
val, idx = min((val, idx) for (idx, val) in enumerate(smooth(y,pts)))
print val, idx
return idx
index = 0
for f_number in file_number:
fnum = str(f_number).zfill(3)
filename = filename_prefix + fnum + filename_suffix
print filename
# extracting data
data = np.genfromtxt(filename, delimiter=',')
Vbias = data[:, vcolumn]
df = data[:, dfcolumn]
amp = data[:, ampcolumn]
diss = data[:, disscolumn]
h_Vbias = Vbias[:len(Vbias)/2]
h_df = df[:len(Vbias)/2] * 50 # scaling to Hz (50 hz/V)
h_amp = amp[:len(Vbias)/2]
h_diss = diss[:len(Vbias)/2]
# determining where to split the data
idx = minimum(h_df,50)
print len(h_df)
# splitting freq shift
h_df_l = h_df[:idx]
h_df_r = h_df[idx:2*idx]
h_df_r = list(reversed(h_df_r))
# calculating difference
h_df_diff = [h_df_l[i] - h_df_r[i] for i in range(0,len(h_df_l))]
# splitting dissipation
h_diss_l = h_diss[:idx]
h_diss_r = h_diss[idx:2*idx]
h_diss_r = list(reversed(h_diss_r))
h_diss_diff = [h_diss_l[i] - h_diss_r[i] for i in range(0,len(h_diss_l))]
# define figure environment
fig1 = plt.figure(1)
fig1.set_figheight(6.8)
fig1.set_figwidth(8.5)
# plotting dissipation vs freq shift
ax0=fig1.add_subplot(1,1,1)
ax0.plot(h_df[:idx], h_diss[:idx],color = 'lime', label = 'left')
ax0.plot(h_df[idx:], h_diss[idx:], color = 'orange',label = 'right')
ax0.set_title("")
ax0.set_xlabel('frequency shift (V)',fontsize=16)
ax0.set_ylabel('dissipation (V)',fontsize=16)
ax0.tick_params(direction='in', length=6, width=2)
ax0.legend(loc='upper right', shadow=True, fontsize='large')
ax0.set_title('')
fig2 = plt.figure(2)
fig2.set_figheight(11)
fig2.set_figwidth(8.5)
ax1=fig2.add_subplot(3,1,1)
ax2=fig2.add_subplot(3,1,2,sharex=ax1)
ax3=fig2.add_subplot(3,1,3,sharex=ax1)
# fitting
# a = np.polyfit(h_Vbias, h_df, 2)
# b = np.poly1d(a)
ax1.plot(h_Vbias,h_df, label = label_list[index])
ax1.plot(h_Vbias,smooth(h_df,100), label = 'smooth')
ax2.plot(h_Vbias,h_amp,label = label_list[index])
ax2.plot(h_Vbias,smooth(h_amp,5), label = 'smooth')
ax3.plot(h_Vbias,h_diss,label = label_list[index])
ax3.plot(h_Vbias,smooth(h_diss,5),label = 'smooth')
ax1.legend(loc='upper right', shadow=True, fontsize='large')
ax1.set_title("")
ax1.set_xlabel('')
ax1.set_ylabel('frequency shift (hz)', fontsize = 16)
ax1.tick_params(direction='in', length=6, width=2)
ax2.set_title("")
ax2.set_xlabel('')
ax2.set_ylabel('amplitude (V)', fontsize = 16)
ax2.tick_params(direction='in', length=6, width=2)
ax3.set_title("")
ax3.set_xlabel('')
ax3.set_ylabel('dissipation (V)', fontsize = 16)
ax3.tick_params(direction='in', length=6, width=2)
fig2.subplots_adjust(hspace=0, right = 0.8)
fig1.subplots_adjust(hspace=0, right = 0.8)
ax1.set_title('')
fig3 = plt.figure(3)
fig3.set_figheight(6.8)
fig3.set_figwidth(8.5)
# plotting left and right overlap ~~~ ~~~ ~~~
ax6=fig3.add_subplot(2,1,1)
ax7 = fig3.add_subplot(2,1,2,sharex=ax6)
ax6.plot(h_Vbias,h_df, label = 'full')
ax6.plot(h_Vbias[:len(h_df_l)], h_df_l,color='lime',label = 'right')
ax6.plot(h_Vbias[:len(h_df_r)], h_df_r, color='orange', label = 'left')
ax6.set_ylabel('df (hz)', fontsize=16)
ax6.tick_params(direction='in', length=6, width=2)
ax6.legend(loc='upper right', shadow=True, fontsize='large')
ax60 = ax6.twinx()
ax60.plot(h_Vbias[:len(h_df_l)], h_df_diff,'r',alpha=0.1)
ax60.set_ylabel('residuals', color='r')
ax60.tick_params('y', colors='r', direction='in')
ax60.set_ylim(-30, 10)
ax60.set_ylabel('residuals', color='r', fontsize=16)
ax60.tick_params(direction='in', length=6, width=2)
ax7.plot(h_Vbias,h_diss, label = 'full')
ax7.plot(h_Vbias[:len(h_diss_l)], h_diss_l, color = 'lime',label = 'left')
ax7.plot(h_Vbias[:len(h_diss_r)], h_diss_r, color='orange',label = 'right')
ax7.set_ylabel('dissipation (V)', fontsize=16)
ax7.set_xlabel('bias (V))', fontsize=16)
ax7.tick_params(direction='in', length=6, width=2)
ax7.legend(loc='upper right', shadow=True, fontsize='large')
ax70 = ax7.twinx()
ax70.plot(h_Vbias[:len(h_diss_l)], h_diss_diff, 'r', alpha=0.1)
ax70.set_ylabel('residuals', color='r', fontsize=16)
ax70.tick_params(direction='in', length=6, width=2)
ax70.tick_params('y', colors='r', direction='in')
index = index +1
np.savetxt("df_qd.csv", np.column_stack((h_Vbias, h_df)), delimiter=",", fmt='%s')
plt.show()
| #!/usr/bin/env python2
# <NAME> 2019
# @harryturr
import numpy as np
import os
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
file_number = np.array([%s]) % #number of ifle
label_list = np.array([%s]) % #label
filename_prefix = 'prefix'
filename_suffix = 'suffix'
vcolumn = 1
dfcolumn = 2
disscolumn = 3
ampcolumn = 4
# moving average box by convolution
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def minimum(y, pts):
val, idx = min((val, idx) for (idx, val) in enumerate(smooth(y,pts)))
print val, idx
return idx
index = 0
for f_number in file_number:
fnum = str(f_number).zfill(3)
filename = filename_prefix + fnum + filename_suffix
print filename
# extracting data
data = np.genfromtxt(filename, delimiter=',')
Vbias = data[:, vcolumn]
df = data[:, dfcolumn]
amp = data[:, ampcolumn]
diss = data[:, disscolumn]
h_Vbias = Vbias[:len(Vbias)/2]
h_df = df[:len(Vbias)/2] * 50 # scaling to Hz (50 hz/V)
h_amp = amp[:len(Vbias)/2]
h_diss = diss[:len(Vbias)/2]
# determining where to split the data
idx = minimum(h_df,50)
print len(h_df)
# splitting freq shift
h_df_l = h_df[:idx]
h_df_r = h_df[idx:2*idx]
h_df_r = list(reversed(h_df_r))
# calculating difference
h_df_diff = [h_df_l[i] - h_df_r[i] for i in range(0,len(h_df_l))]
# splitting dissipation
h_diss_l = h_diss[:idx]
h_diss_r = h_diss[idx:2*idx]
h_diss_r = list(reversed(h_diss_r))
h_diss_diff = [h_diss_l[i] - h_diss_r[i] for i in range(0,len(h_diss_l))]
# define figure environment
fig1 = plt.figure(1)
fig1.set_figheight(6.8)
fig1.set_figwidth(8.5)
# plotting dissipation vs freq shift
ax0=fig1.add_subplot(1,1,1)
ax0.plot(h_df[:idx], h_diss[:idx],color = 'lime', label = 'left')
ax0.plot(h_df[idx:], h_diss[idx:], color = 'orange',label = 'right')
ax0.set_title("")
ax0.set_xlabel('frequency shift (V)',fontsize=16)
ax0.set_ylabel('dissipation (V)',fontsize=16)
ax0.tick_params(direction='in', length=6, width=2)
ax0.legend(loc='upper right', shadow=True, fontsize='large')
ax0.set_title('')
fig2 = plt.figure(2)
fig2.set_figheight(11)
fig2.set_figwidth(8.5)
ax1=fig2.add_subplot(3,1,1)
ax2=fig2.add_subplot(3,1,2,sharex=ax1)
ax3=fig2.add_subplot(3,1,3,sharex=ax1)
# fitting
# a = np.polyfit(h_Vbias, h_df, 2)
# b = np.poly1d(a)
ax1.plot(h_Vbias,h_df, label = label_list[index])
ax1.plot(h_Vbias,smooth(h_df,100), label = 'smooth')
ax2.plot(h_Vbias,h_amp,label = label_list[index])
ax2.plot(h_Vbias,smooth(h_amp,5), label = 'smooth')
ax3.plot(h_Vbias,h_diss,label = label_list[index])
ax3.plot(h_Vbias,smooth(h_diss,5),label = 'smooth')
ax1.legend(loc='upper right', shadow=True, fontsize='large')
ax1.set_title("")
ax1.set_xlabel('')
ax1.set_ylabel('frequency shift (hz)', fontsize = 16)
ax1.tick_params(direction='in', length=6, width=2)
ax2.set_title("")
ax2.set_xlabel('')
ax2.set_ylabel('amplitude (V)', fontsize = 16)
ax2.tick_params(direction='in', length=6, width=2)
ax3.set_title("")
ax3.set_xlabel('')
ax3.set_ylabel('dissipation (V)', fontsize = 16)
ax3.tick_params(direction='in', length=6, width=2)
fig2.subplots_adjust(hspace=0, right = 0.8)
fig1.subplots_adjust(hspace=0, right = 0.8)
ax1.set_title('')
fig3 = plt.figure(3)
fig3.set_figheight(6.8)
fig3.set_figwidth(8.5)
# plotting left and right overlap ~~~ ~~~ ~~~
ax6=fig3.add_subplot(2,1,1)
ax7 = fig3.add_subplot(2,1,2,sharex=ax6)
ax6.plot(h_Vbias,h_df, label = 'full')
ax6.plot(h_Vbias[:len(h_df_l)], h_df_l,color='lime',label = 'right')
ax6.plot(h_Vbias[:len(h_df_r)], h_df_r, color='orange', label = 'left')
ax6.set_ylabel('df (hz)', fontsize=16)
ax6.tick_params(direction='in', length=6, width=2)
ax6.legend(loc='upper right', shadow=True, fontsize='large')
ax60 = ax6.twinx()
ax60.plot(h_Vbias[:len(h_df_l)], h_df_diff,'r',alpha=0.1)
ax60.set_ylabel('residuals', color='r')
ax60.tick_params('y', colors='r', direction='in')
ax60.set_ylim(-30, 10)
ax60.set_ylabel('residuals', color='r', fontsize=16)
ax60.tick_params(direction='in', length=6, width=2)
ax7.plot(h_Vbias,h_diss, label = 'full')
ax7.plot(h_Vbias[:len(h_diss_l)], h_diss_l, color = 'lime',label = 'left')
ax7.plot(h_Vbias[:len(h_diss_r)], h_diss_r, color='orange',label = 'right')
ax7.set_ylabel('dissipation (V)', fontsize=16)
ax7.set_xlabel('bias (V))', fontsize=16)
ax7.tick_params(direction='in', length=6, width=2)
ax7.legend(loc='upper right', shadow=True, fontsize='large')
ax70 = ax7.twinx()
ax70.plot(h_Vbias[:len(h_diss_l)], h_diss_diff, 'r', alpha=0.1)
ax70.set_ylabel('residuals', color='r', fontsize=16)
ax70.tick_params(direction='in', length=6, width=2)
ax70.tick_params('y', colors='r', direction='in')
index = index +1
np.savetxt("df_qd.csv", np.column_stack((h_Vbias, h_df)), delimiter=",", fmt='%s')
plt.show()
| en | 0.7306 | #!/usr/bin/env python2 # <NAME> 2019 # @harryturr #number of ifle #label # moving average box by convolution # extracting data # scaling to Hz (50 hz/V) # determining where to split the data # splitting freq shift # calculating difference # splitting dissipation # define figure environment # plotting dissipation vs freq shift # fitting # a = np.polyfit(h_Vbias, h_df, 2) # b = np.poly1d(a) # plotting left and right overlap ~~~ ~~~ ~~~ | 2.474503 | 2 |
pYadivForm.py | alanphys/pYadiv | 1 | 6622789 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pYadivForm.ui',
# licensing of 'pYadivForm.ui' applies.
#
# Created: Mon Jul 15 11:15:34 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_pYadivForm(object):
def setupUi(self, pYadivForm):
pYadivForm.setObjectName("pYadivForm")
pYadivForm.resize(528, 605)
pYadivForm.setAcceptDrops(True)
self.centralwidget = QtWidgets.QWidget(pYadivForm)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setAcceptDrops(True)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.qlImage = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.qlImage.sizePolicy().hasHeightForWidth())
self.qlImage.setSizePolicy(sizePolicy)
self.qlImage.setAcceptDrops(True)
self.qlImage.setText("")
self.qlImage.setScaledContents(False)
self.qlImage.setObjectName("qlImage")
self.gridLayout.addWidget(self.qlImage, 0, 0, 1, 1)
pYadivForm.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(pYadivForm)
self.menubar.setGeometry(QtCore.QRect(0, 0, 528, 22))
self.menubar.setObjectName("menubar")
self.menu_File = QtWidgets.QMenu(self.menubar)
self.menu_File.setObjectName("menu_File")
self.menu_Help = QtWidgets.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
pYadivForm.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(pYadivForm)
self.statusbar.setObjectName("statusbar")
pYadivForm.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(pYadivForm)
self.toolBar.setObjectName("toolBar")
pYadivForm.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionOpen = QtWidgets.QAction(pYadivForm)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Images/Icons/ImageOpen.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon)
self.actionOpen.setObjectName("actionOpen")
self.actionExit = QtWidgets.QAction(pYadivForm)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/Images/Icons/exit.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon1)
self.actionExit.setObjectName("actionExit")
self.actionAbout = QtWidgets.QAction(pYadivForm)
self.actionAbout.setObjectName("actionAbout")
self.actionInvert = QtWidgets.QAction(pYadivForm)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/Images/Icons/invert.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionInvert.setIcon(icon2)
self.actionInvert.setObjectName("actionInvert")
self.actionAuto_Window = QtWidgets.QAction(pYadivForm)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/Images/Icons/window.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAuto_Window.setIcon(icon3)
self.actionAuto_Window.setObjectName("actionAuto_Window")
self.menu_File.addAction(self.actionOpen)
self.menu_File.addAction(self.actionExit)
self.menu_Help.addAction(self.actionAbout)
self.menuTools.addAction(self.actionInvert)
self.menuTools.addAction(self.actionAuto_Window)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar.addAction(self.actionOpen)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionInvert)
self.toolBar.addAction(self.actionAuto_Window)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExit)
self.retranslateUi(pYadivForm)
QtCore.QObject.connect(self.actionExit, QtCore.SIGNAL("triggered()"), pYadivForm.close)
QtCore.QMetaObject.connectSlotsByName(pYadivForm)
def retranslateUi(self, pYadivForm):
pYadivForm.setWindowTitle(QtWidgets.QApplication.translate("pYadivForm", "pYadiv Form", None, -1))
self.menu_File.setTitle(QtWidgets.QApplication.translate("pYadivForm", "Fi&le", None, -1))
self.menu_Help.setTitle(QtWidgets.QApplication.translate("pYadivForm", "&Help", None, -1))
self.menuTools.setTitle(QtWidgets.QApplication.translate("pYadivForm", "&Tools", None, -1))
self.toolBar.setWindowTitle(QtWidgets.QApplication.translate("pYadivForm", "toolBar", None, -1))
self.actionOpen.setText(QtWidgets.QApplication.translate("pYadivForm", "&Open", None, -1))
self.actionExit.setText(QtWidgets.QApplication.translate("pYadivForm", "E&xit", None, -1))
self.actionAbout.setText(QtWidgets.QApplication.translate("pYadivForm", "&About", None, -1))
self.actionInvert.setText(QtWidgets.QApplication.translate("pYadivForm", "&Invert", None, -1))
self.actionAuto_Window.setText(QtWidgets.QApplication.translate("pYadivForm", "Auto &Window", None, -1))
import pYadivForm_rc
| # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pYadivForm.ui',
# licensing of 'pYadivForm.ui' applies.
#
# Created: Mon Jul 15 11:15:34 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_pYadivForm(object):
def setupUi(self, pYadivForm):
pYadivForm.setObjectName("pYadivForm")
pYadivForm.resize(528, 605)
pYadivForm.setAcceptDrops(True)
self.centralwidget = QtWidgets.QWidget(pYadivForm)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setAcceptDrops(True)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.qlImage = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.qlImage.sizePolicy().hasHeightForWidth())
self.qlImage.setSizePolicy(sizePolicy)
self.qlImage.setAcceptDrops(True)
self.qlImage.setText("")
self.qlImage.setScaledContents(False)
self.qlImage.setObjectName("qlImage")
self.gridLayout.addWidget(self.qlImage, 0, 0, 1, 1)
pYadivForm.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(pYadivForm)
self.menubar.setGeometry(QtCore.QRect(0, 0, 528, 22))
self.menubar.setObjectName("menubar")
self.menu_File = QtWidgets.QMenu(self.menubar)
self.menu_File.setObjectName("menu_File")
self.menu_Help = QtWidgets.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
pYadivForm.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(pYadivForm)
self.statusbar.setObjectName("statusbar")
pYadivForm.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(pYadivForm)
self.toolBar.setObjectName("toolBar")
pYadivForm.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionOpen = QtWidgets.QAction(pYadivForm)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Images/Icons/ImageOpen.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon)
self.actionOpen.setObjectName("actionOpen")
self.actionExit = QtWidgets.QAction(pYadivForm)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/Images/Icons/exit.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon1)
self.actionExit.setObjectName("actionExit")
self.actionAbout = QtWidgets.QAction(pYadivForm)
self.actionAbout.setObjectName("actionAbout")
self.actionInvert = QtWidgets.QAction(pYadivForm)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/Images/Icons/invert.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionInvert.setIcon(icon2)
self.actionInvert.setObjectName("actionInvert")
self.actionAuto_Window = QtWidgets.QAction(pYadivForm)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/Images/Icons/window.xpm"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAuto_Window.setIcon(icon3)
self.actionAuto_Window.setObjectName("actionAuto_Window")
self.menu_File.addAction(self.actionOpen)
self.menu_File.addAction(self.actionExit)
self.menu_Help.addAction(self.actionAbout)
self.menuTools.addAction(self.actionInvert)
self.menuTools.addAction(self.actionAuto_Window)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar.addAction(self.actionOpen)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionInvert)
self.toolBar.addAction(self.actionAuto_Window)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExit)
self.retranslateUi(pYadivForm)
QtCore.QObject.connect(self.actionExit, QtCore.SIGNAL("triggered()"), pYadivForm.close)
QtCore.QMetaObject.connectSlotsByName(pYadivForm)
def retranslateUi(self, pYadivForm):
pYadivForm.setWindowTitle(QtWidgets.QApplication.translate("pYadivForm", "pYadiv Form", None, -1))
self.menu_File.setTitle(QtWidgets.QApplication.translate("pYadivForm", "Fi&le", None, -1))
self.menu_Help.setTitle(QtWidgets.QApplication.translate("pYadivForm", "&Help", None, -1))
self.menuTools.setTitle(QtWidgets.QApplication.translate("pYadivForm", "&Tools", None, -1))
self.toolBar.setWindowTitle(QtWidgets.QApplication.translate("pYadivForm", "toolBar", None, -1))
self.actionOpen.setText(QtWidgets.QApplication.translate("pYadivForm", "&Open", None, -1))
self.actionExit.setText(QtWidgets.QApplication.translate("pYadivForm", "E&xit", None, -1))
self.actionAbout.setText(QtWidgets.QApplication.translate("pYadivForm", "&About", None, -1))
self.actionInvert.setText(QtWidgets.QApplication.translate("pYadivForm", "&Invert", None, -1))
self.actionAuto_Window.setText(QtWidgets.QApplication.translate("pYadivForm", "Auto &Window", None, -1))
import pYadivForm_rc
| en | 0.782074 | # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'pYadivForm.ui', # licensing of 'pYadivForm.ui' applies. # # Created: Mon Jul 15 11:15:34 2019 # by: pyside2-uic running on PySide2 5.12.1 # # WARNING! All changes made in this file will be lost! | 1.422722 | 1 |
topfarm/tests/test_files/xy3tb.py | DTUWindEnergy/TopFarm2 | 4 | 6622790 | <filename>topfarm/tests/test_files/xy3tb.py
import numpy as np
from topfarm.cost_models.dummy import DummyCost, DummyCostPlotComp
from topfarm.easy_drivers import EasyScipyOptimizeDriver
from topfarm._topfarm import TopFarmProblem
from topfarm.plotting import NoPlot
from topfarm.constraint_components.spacing import SpacingConstraint
from topfarm.constraint_components.capacity import CapacityConstraint, CapacityComp
import topfarm
from topfarm.constraint_components.boundary import XYBoundaryConstraint
initial = np.array([[6, 0], [6, -8], [1, 1]]) # initial turbine layouts
optimal = np.array([[2.5, -3], [6, -7], [4.5, -3]]) # optimal turbine layouts
boundary = np.array([(0, 0), (6, 0), (6, -10), (0, -10)]) # turbine boundaries
desired = np.array([[3, -3], [7, -7], [4, -3]]) # desired turbine layouts
desvars = {topfarm.x_key: initial[:, 0], topfarm.y_key: initial[:, 1]}
capacit = {"max_capacity": 5000, "rated_power_array": [3000, 1000, 500]}
def get_tf(plot=False, **kwargs):
k = {'cost_comp': DummyCost(desired[:, :2], [topfarm.x_key, topfarm.y_key]),
'design_vars': {topfarm.x_key: initial[:, 0], topfarm.y_key: initial[:, 1]},
'driver': EasyScipyOptimizeDriver(disp=False, tol=1e-8),
'plot_comp': NoPlot(),
'constraints': [SpacingConstraint(2), XYBoundaryConstraint(boundary), CapacityConstraint(**capacit)]}
if plot:
k['plot_comp'] = DummyCostPlotComp(desired)
k.update(kwargs)
return TopFarmProblem(**k)
| <filename>topfarm/tests/test_files/xy3tb.py
import numpy as np
from topfarm.cost_models.dummy import DummyCost, DummyCostPlotComp
from topfarm.easy_drivers import EasyScipyOptimizeDriver
from topfarm._topfarm import TopFarmProblem
from topfarm.plotting import NoPlot
from topfarm.constraint_components.spacing import SpacingConstraint
from topfarm.constraint_components.capacity import CapacityConstraint, CapacityComp
import topfarm
from topfarm.constraint_components.boundary import XYBoundaryConstraint
initial = np.array([[6, 0], [6, -8], [1, 1]]) # initial turbine layouts
optimal = np.array([[2.5, -3], [6, -7], [4.5, -3]]) # optimal turbine layouts
boundary = np.array([(0, 0), (6, 0), (6, -10), (0, -10)]) # turbine boundaries
desired = np.array([[3, -3], [7, -7], [4, -3]]) # desired turbine layouts
desvars = {topfarm.x_key: initial[:, 0], topfarm.y_key: initial[:, 1]}
capacit = {"max_capacity": 5000, "rated_power_array": [3000, 1000, 500]}
def get_tf(plot=False, **kwargs):
k = {'cost_comp': DummyCost(desired[:, :2], [topfarm.x_key, topfarm.y_key]),
'design_vars': {topfarm.x_key: initial[:, 0], topfarm.y_key: initial[:, 1]},
'driver': EasyScipyOptimizeDriver(disp=False, tol=1e-8),
'plot_comp': NoPlot(),
'constraints': [SpacingConstraint(2), XYBoundaryConstraint(boundary), CapacityConstraint(**capacit)]}
if plot:
k['plot_comp'] = DummyCostPlotComp(desired)
k.update(kwargs)
return TopFarmProblem(**k)
| en | 0.601439 | # initial turbine layouts # optimal turbine layouts # turbine boundaries # desired turbine layouts | 2.114615 | 2 |
tests/storage/cases/test_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N.py | juztin/pytezos-1 | 1 | 6622791 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N.json')
def test_storage_encoding_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N.json')
def test_storage_encoding_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1TguwcxJYaEYSMhXJFjHzXmro8cP37K13N(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| none | 1 | 2.483882 | 2 | |
properties/p.py | anandjoshi91/pythonpropertyfileloader | 3 | 6622792 | <gh_stars>1-10
import re
from collections import OrderedDict
class Property:
""" A class similar to Properties class in Java
Reads variables/properties defined in a file
Allows cross referencing of variables
"""
def __init__(self, assign_token: str = '=', comment_token: str = '#', line_append_token: str = '\\',
ordered: bool =True):
""" optional parameters
A standard property file follows the convention
= is used to assign a variable or property
# for comments in the property file
\ a long variable definition can span across multiple lines. Use \ to continue to next line
override them if your property file uses different convention
True is ordered
"""
self.__props = OrderedDict() if ordered else dict()
self.__assign_token = assign_token
self.__comment_token = comment_token
self.__line_append_token = line_append_token
def load_property_files(self, *argv):
"""
:param argv: Takes variable length of arguments. Enter the property files to be loaded.
:return: Dictionary. Key value pair of properties after their evaluation
"""
self.__read_property_files(*argv)
for key in self.__props.keys():
self.__props[key] = self.__evaluate_properties(key)
return self.__props
def __read_property_files(self, *argv):
""" Reads one or more property files
Takes in the input path of the file as a String
"""
if len(argv) < 1:
print('Please provide a property file to be loaded.')
else:
try:
for prop_file in argv:
line = ''
with open(prop_file, 'rt') as f:
for single_line in f:
l = single_line.strip()
if l and not l.startswith(self.__comment_token):
if l.endswith(self.__line_append_token):
# Property descriptions spans multiple lines. Append new line with previous lines
line += l
line = line[:-1] # Strip \ from the line
else:
if len(line) > 0:
line += l
l = line.strip()
index_of_separator = l.find('=')
key = l[:index_of_separator].strip()
value = l[index_of_separator+1:].strip()
if key not in self.__props.keys():
self.__props[key] = value
line = ''
else:
print('Property : ', key, ' = ', value, ' already defined !')
except Exception as error:
raise IOError('Error in loading property file. Check file(s) = ', argv, ' ', error)
def __evaluate_properties(self, key):
""" Private method.
Recursively evaluates a property defined
in terms of other properties
"""
if key in self.__props.keys():
val = self.__props[key]
else:
val = key
evalset = set(re.findall(r'(?<={)[^}]*(?=})', val))
try:
# If the set is empty. This is the final value. return it
if not evalset:
return val
else:
for token in evalset:
replace_this = '${' + token + '}'
replace_with = self.__props[token]
val = val.replace(replace_this, replace_with)
return self.__evaluate_properties(val)
except Exception as error:
raise ValueError('Please check property files. Some property might not be defined. Check ', token, ' ',
error)
| import re
from collections import OrderedDict
class Property:
""" A class similar to Properties class in Java
Reads variables/properties defined in a file
Allows cross referencing of variables
"""
def __init__(self, assign_token: str = '=', comment_token: str = '#', line_append_token: str = '\\',
ordered: bool =True):
""" optional parameters
A standard property file follows the convention
= is used to assign a variable or property
# for comments in the property file
\ a long variable definition can span across multiple lines. Use \ to continue to next line
override them if your property file uses different convention
True is ordered
"""
self.__props = OrderedDict() if ordered else dict()
self.__assign_token = assign_token
self.__comment_token = comment_token
self.__line_append_token = line_append_token
def load_property_files(self, *argv):
"""
:param argv: Takes variable length of arguments. Enter the property files to be loaded.
:return: Dictionary. Key value pair of properties after their evaluation
"""
self.__read_property_files(*argv)
for key in self.__props.keys():
self.__props[key] = self.__evaluate_properties(key)
return self.__props
def __read_property_files(self, *argv):
""" Reads one or more property files
Takes in the input path of the file as a String
"""
if len(argv) < 1:
print('Please provide a property file to be loaded.')
else:
try:
for prop_file in argv:
line = ''
with open(prop_file, 'rt') as f:
for single_line in f:
l = single_line.strip()
if l and not l.startswith(self.__comment_token):
if l.endswith(self.__line_append_token):
# Property descriptions spans multiple lines. Append new line with previous lines
line += l
line = line[:-1] # Strip \ from the line
else:
if len(line) > 0:
line += l
l = line.strip()
index_of_separator = l.find('=')
key = l[:index_of_separator].strip()
value = l[index_of_separator+1:].strip()
if key not in self.__props.keys():
self.__props[key] = value
line = ''
else:
print('Property : ', key, ' = ', value, ' already defined !')
except Exception as error:
raise IOError('Error in loading property file. Check file(s) = ', argv, ' ', error)
def __evaluate_properties(self, key):
""" Private method.
Recursively evaluates a property defined
in terms of other properties
"""
if key in self.__props.keys():
val = self.__props[key]
else:
val = key
evalset = set(re.findall(r'(?<={)[^}]*(?=})', val))
try:
# If the set is empty. This is the final value. return it
if not evalset:
return val
else:
for token in evalset:
replace_this = '${' + token + '}'
replace_with = self.__props[token]
val = val.replace(replace_this, replace_with)
return self.__evaluate_properties(val)
except Exception as error:
raise ValueError('Please check property files. Some property might not be defined. Check ', token, ' ',
error) | en | 0.809609 | A class similar to Properties class in Java
Reads variables/properties defined in a file
Allows cross referencing of variables optional parameters
A standard property file follows the convention
= is used to assign a variable or property
# for comments in the property file
\ a long variable definition can span across multiple lines. Use \ to continue to next line
override them if your property file uses different convention
True is ordered :param argv: Takes variable length of arguments. Enter the property files to be loaded.
:return: Dictionary. Key value pair of properties after their evaluation Reads one or more property files
Takes in the input path of the file as a String # Property descriptions spans multiple lines. Append new line with previous lines # Strip \ from the line Private method.
Recursively evaluates a property defined
in terms of other properties # If the set is empty. This is the final value. return it | 3.939425 | 4 |
python/PyPedals.py | enok82/VLCPedals | 0 | 6622793 | '''
Created on 11 apr. 2017
@author: stefan
'''
from telnetlib import Telnet
from serial import Serial
from time import sleep
class VlcControl:
def __init__(self):
self.telnetConnection = Telnet("localhost", 4212)
telnetBuffer = ""
while not telnetBuffer.endswith("Password: "):
telnetBuffer = self.telnetConnection.read_eager()
telnetBuffer = telnetBuffer.decode("utf-8")
if telnetBuffer != '':
print(telnetBuffer)
self.sendCommand(b"python\n")
telnetBuffer = ""
while not telnetBuffer.endswith("> "):
telnetBuffer = self.telnetConnection.read_eager()
telnetBuffer = telnetBuffer.decode("utf-8")
if telnetBuffer != '':
print(telnetBuffer)
def sendCommand(self, cmd):
self.telnetConnection.write(cmd)
class SerialControl:
def __init__(self, port = "COM10"):
self.serialConnection = Serial(port, 9600, timeout = 0)
if __name__ == '__main__':
vlcControl = VlcControl()
serialControl = SerialControl()
playbackRate = 1
while True:
sleep(0.2)
receivedCommand = serialControl.serialConnection.readline().strip().decode("utf-8")
if receivedCommand == "#0+0":
vlcControl.sendCommand(b"seek +15\n")
print(receivedCommand)
elif receivedCommand == "#1+0":
vlcControl.sendCommand(b"pause\n")
print(receivedCommand)
elif receivedCommand == "#2+0":
vlcControl.sendCommand(b"seek -15\n")
print(receivedCommand)
elif receivedCommand == "#0+1":
playbackRate += 0.1
vlcControl.sendCommand(str.encode("rate {}\n".format(playbackRate)))
print(receivedCommand)
elif receivedCommand == "#1+1":
playbackRate = 1
vlcControl.sendCommand(str.encode("rate {}\n".format(playbackRate)))
print(receivedCommand)
elif receivedCommand == "#2+1":
playbackRate -= 0.1
vlcControl.sendCommand(str.encode("rate {}\n".format(playbackRate)))
print(receivedCommand)
elif receivedCommand != "":
print(receivedCommand)
receivedCommand = ""
| '''
Created on 11 apr. 2017
@author: stefan
'''
from telnetlib import Telnet
from serial import Serial
from time import sleep
class VlcControl:
def __init__(self):
self.telnetConnection = Telnet("localhost", 4212)
telnetBuffer = ""
while not telnetBuffer.endswith("Password: "):
telnetBuffer = self.telnetConnection.read_eager()
telnetBuffer = telnetBuffer.decode("utf-8")
if telnetBuffer != '':
print(telnetBuffer)
self.sendCommand(b"python\n")
telnetBuffer = ""
while not telnetBuffer.endswith("> "):
telnetBuffer = self.telnetConnection.read_eager()
telnetBuffer = telnetBuffer.decode("utf-8")
if telnetBuffer != '':
print(telnetBuffer)
def sendCommand(self, cmd):
self.telnetConnection.write(cmd)
class SerialControl:
def __init__(self, port = "COM10"):
self.serialConnection = Serial(port, 9600, timeout = 0)
if __name__ == '__main__':
vlcControl = VlcControl()
serialControl = SerialControl()
playbackRate = 1
while True:
sleep(0.2)
receivedCommand = serialControl.serialConnection.readline().strip().decode("utf-8")
if receivedCommand == "#0+0":
vlcControl.sendCommand(b"seek +15\n")
print(receivedCommand)
elif receivedCommand == "#1+0":
vlcControl.sendCommand(b"pause\n")
print(receivedCommand)
elif receivedCommand == "#2+0":
vlcControl.sendCommand(b"seek -15\n")
print(receivedCommand)
elif receivedCommand == "#0+1":
playbackRate += 0.1
vlcControl.sendCommand(str.encode("rate {}\n".format(playbackRate)))
print(receivedCommand)
elif receivedCommand == "#1+1":
playbackRate = 1
vlcControl.sendCommand(str.encode("rate {}\n".format(playbackRate)))
print(receivedCommand)
elif receivedCommand == "#2+1":
playbackRate -= 0.1
vlcControl.sendCommand(str.encode("rate {}\n".format(playbackRate)))
print(receivedCommand)
elif receivedCommand != "":
print(receivedCommand)
receivedCommand = ""
| en | 0.442336 | Created on 11 apr. 2017 @author: stefan | 2.580562 | 3 |
Code/src/models/networks/AE_ResNet18_dual.py | antoine-spahr/X-ray-Anomaly-Detection | 2 | 6622794 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.utils
from src.models.networks.ResNetBlocks import DownResBlock, UpResBlock
class ResNet18_Encoder(nn.Module):
"""
Combine multiple Residual block to form a ResNet18 up to the Average poolong
layer. The size of the embeding dimension can be different than the one from
ResNet18. The ResNet18 part can be initialized with pretrained weights on ImageNet.
"""
def __init__(self, pretrained=False):
"""
Build the Encoder from the layer's specification. The encoder is composed
of an initial 7x7 convolution that halves the input dimension (h and w)
followed by several layers of residual blocks. Each layer is composed of
k Residual blocks. The first one reduce the input height and width by a
factor 2 while the number of channel is increased by 2.
----------
INPUT
|---- pretrained (bool) whether the ResNet18 should be loaded with
| pretrained weights on Imagenet.
OUTPUT
|---- None
"""
nn.Module.__init__(self)
# First convolution
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Residual layers
self.layer1 = nn.Sequential(DownResBlock(64, 64, downsample=False),
DownResBlock(64, 64, downsample=False))
self.layer2 = nn.Sequential(DownResBlock(64, 128, downsample=True),
DownResBlock(128, 128, downsample=False))
self.layer3 = nn.Sequential(DownResBlock(128, 256, downsample=True),
DownResBlock(256, 256, downsample=False))
self.layer4 = nn.Sequential(DownResBlock(256, 512, downsample=True),
DownResBlock(512, 512, downsample=False))
if pretrained: self.load_pretrain()
def forward(self, x):
"""
Forward pass of the Encoder.
----------
INPUT
|---- x (torch.Tensor) the input tensor (B x C x H x W). The input
| image can be grayscale or RGB. If it's grayscale it will
| be converted to RGB by stacking 3 copy.
OUTPUT
|---- out (torch.Tensor) the embedding of the image x in embed_dim
| vector dimension.
"""
# if grayscale (1 channel) convert to to RGB by duplicating on 3 channel
# assuming shape : (... x C x H x W)
if x.shape[-3] == 1:
x = torch.cat([x]*3, dim=1)
# first 1x1 convolution
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# 4 layers
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_pretrain(self):
"""
Initialize the Encoder's weights with the weights pretrained on ImageNet.
----------
INPUT
|---- None
OUTPUT
|---- None
"""
# download ResNet18 trained on ImageNet state dict
pretrainResNet18_state_dict = torchvision.models.utils.load_state_dict_from_url('https://download.pytorch.org/models/resnet18-5c106cde.pth')
# Get the modified ResNet Encoder state dict
model_state_dict = self.state_dict()
# keep only matching keys
pretrained_dict = {k: v for k, v in pretrainResNet18_state_dict.items() if k in model_state_dict}
# upadte state dict
model_state_dict.update(pretrained_dict)
self.load_state_dict(model_state_dict)
class ResNet18_Decoder(nn.Module):
"""
Combine multiple Up Residual Blocks to form a ResNet18 like decoder.
"""
def __init__(self, output_channels=3):
"""
Build the ResNet18-like decoder. The decoder is composed of a Linear layer.
The linear layer is interpolated (bilinear) to 512x16x16 which is then
processed by several Up-layer of Up Residual Blocks. Each Up-layer is
composed of k Up residual blocks. The first ones are without up sampling.
The last one increase the input size (h and w) by a factor 2 and reduce
the number of channels by a factor 2.
---------
INPUT
|---- output_size (tuple) the decoder output size. (C x H x W)
OUTPUT
|---- None
"""
nn.Module.__init__(self)
self.uplayer1 = nn.Sequential(UpResBlock(512, 512, upsample=False),
UpResBlock(512, 256, upsample=True))
self.uplayer2 = nn.Sequential(UpResBlock(256, 256, upsample=False),
UpResBlock(256, 128, upsample=True))
self.uplayer3 = nn.Sequential(UpResBlock(128, 128, upsample=False),
UpResBlock(128, 64, upsample=True))
self.uplayer4 = nn.Sequential(UpResBlock(64, 64, upsample=False),
UpResBlock(64, 64, upsample=True))
self.uplayer_final = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2, align_corners=True),
nn.Conv2d(64, output_channels, kernel_size=1, stride=1, bias=False))
self.final_activation = nn.Tanh()
def forward(self, x):
"""
Forward pass of the decoder.
----------
INPUT
|---- x (torch.Tensor) the input with dimension (B x embed_dim).
OUTPUT
|---- out (torch.Tensor) the reconstructed image (B x C x H x W).
"""
x = self.uplayer1(x)
x = self.uplayer2(x)
x = self.uplayer3(x)
x = self.uplayer4(x)
x = self.uplayer_final(x)
x = self.final_activation(x)
return x
class AE_SVDD_Hybrid(nn.Module):
"""
Autoencoder based on the ResNet18. The Encoder is a ResNet18 up to the
average pooling layer, and the decoder is a mirrored ResNet18. The embedding
is additionnaly processed through two convolutional layers to generate a
smaller embedding for the DeepSVDD data representation.
"""
def __init__(self, pretrain_ResNetEnc=False, output_channels=3, return_svdd_embed=True):
"""
Build the ResNet18 Autoencoder.The Encoder can be initialized with
weights pretrained on ImageNet.
----------
INPUT
|---- pretrain_ResNetEnc (bool) whether to use pretrained weights on
| ImageNet for the encoder initialization.
|---- out_channel (int) the output channel of the reconstructed image.
|---- return_embed (bool) whether to return the DeepSVDD embedding
| in the forward
OUTPUT
|---- None
"""
nn.Module.__init__(self)
self.return_svdd_embed = return_svdd_embed
self.encoder = ResNet18_Encoder(pretrained=pretrain_ResNetEnc)
self.conv_svdd = nn.Sequential(nn.AvgPool2d(kernel_size=3, stride=2),
nn.Conv2d(512, 256, 3, stride=1, bias=False),
nn.BatchNorm2d(256, affine=False),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(256, 128, 1, stride=1, bias=False),
nn.BatchNorm2d(128, affine=False))
self.decoder = ResNet18_Decoder(output_channels=output_channels)
def forward(self, input):
"""
Foward pass of the Autoencoder to reconstruct the provided image.
----------
INPUT
|---- input (torch.Tensor) the input image (Grayscale or RGB) with
| dimension (B x C x H x W).
OUTPUT
|---- rec (torch.Tensor) the reconstructed image (B x C' x H x W)
"""
ae_embedding = self.encoder(input)
rec = self.decoder(ae_embedding)
svdd_embedding = None
if self.return_svdd_embed:
svdd_embedding = self.conv_svdd(ae_embedding)
svdd_embedding = torch.flatten(svdd_embedding, start_dim=1)
return rec, svdd_embedding
class AE_ResNet18(nn.Module):
"""
Define an autoencoder with a ResNet18 backbone : The encoder is similar to a
ResNet18 up to the last convolutional layer to which an AvgPool2d layer is
added to divide the output dimension by a factor 2. The decoder is an Upsample
layer followed by a mirrored ResNet18. The deconvolution is performed by
upsampling + convolution.
"""
def __init__(self, pretrain_ResNetEnc=False, output_channels=3):
"""
Build the ResNet18 Autoencoder.The Encoder can be initialized with
weights pretrained on ImageNet.
----------
INPUT
|---- pretrain_ResNetEnc (bool) whether to use pretrained weights on
| ImageNet for the encoder initialization.
|---- out_channel (int) the output channel of the reconstructed image.
OUTPUT
|---- None
"""
nn.Module.__init__(self)
self.encoder = ResNet18_Encoder(pretrained=pretrain_ResNetEnc)
#self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
#self.interp = nn.Upsample(mode='bilinear', scale_factor=2, align_corners=True)
self.decoder = ResNet18_Decoder(output_channels=output_channels)
self.encoding_only = False
def forward(self, input):
"""
Foward pass of the Autoencoder to reconstruct the provided image.
----------
INPUT
|---- input (torch.Tensor) the input image (Grayscale or RGB) with
| dimension (B x C x H x W).
OUTPUT
|---- rec (torch.Tensor) the reconstructed image (B x C' x H x W)
"""
ae_embedding = self.encoder(input)
#ae_embedding = self.avg_pool(ae_embedding)
rec = None
if not self.encoding_only:
rec = self.decoder(ae_embedding) #self.interp(ae_embedding))
return rec, ae_embedding
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.utils
from src.models.networks.ResNetBlocks import DownResBlock, UpResBlock
class ResNet18_Encoder(nn.Module):
"""
Combine multiple Residual block to form a ResNet18 up to the Average poolong
layer. The size of the embeding dimension can be different than the one from
ResNet18. The ResNet18 part can be initialized with pretrained weights on ImageNet.
"""
def __init__(self, pretrained=False):
"""
Build the Encoder from the layer's specification. The encoder is composed
of an initial 7x7 convolution that halves the input dimension (h and w)
followed by several layers of residual blocks. Each layer is composed of
k Residual blocks. The first one reduce the input height and width by a
factor 2 while the number of channel is increased by 2.
----------
INPUT
|---- pretrained (bool) whether the ResNet18 should be loaded with
| pretrained weights on Imagenet.
OUTPUT
|---- None
"""
nn.Module.__init__(self)
# First convolution
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Residual layers
self.layer1 = nn.Sequential(DownResBlock(64, 64, downsample=False),
DownResBlock(64, 64, downsample=False))
self.layer2 = nn.Sequential(DownResBlock(64, 128, downsample=True),
DownResBlock(128, 128, downsample=False))
self.layer3 = nn.Sequential(DownResBlock(128, 256, downsample=True),
DownResBlock(256, 256, downsample=False))
self.layer4 = nn.Sequential(DownResBlock(256, 512, downsample=True),
DownResBlock(512, 512, downsample=False))
if pretrained: self.load_pretrain()
def forward(self, x):
"""
Forward pass of the Encoder.
----------
INPUT
|---- x (torch.Tensor) the input tensor (B x C x H x W). The input
| image can be grayscale or RGB. If it's grayscale it will
| be converted to RGB by stacking 3 copy.
OUTPUT
|---- out (torch.Tensor) the embedding of the image x in embed_dim
| vector dimension.
"""
# if grayscale (1 channel) convert to to RGB by duplicating on 3 channel
# assuming shape : (... x C x H x W)
if x.shape[-3] == 1:
x = torch.cat([x]*3, dim=1)
# first 1x1 convolution
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# 4 layers
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_pretrain(self):
"""
Initialize the Encoder's weights with the weights pretrained on ImageNet.
----------
INPUT
|---- None
OUTPUT
|---- None
"""
# download ResNet18 trained on ImageNet state dict
pretrainResNet18_state_dict = torchvision.models.utils.load_state_dict_from_url('https://download.pytorch.org/models/resnet18-5c106cde.pth')
# Get the modified ResNet Encoder state dict
model_state_dict = self.state_dict()
# keep only matching keys
pretrained_dict = {k: v for k, v in pretrainResNet18_state_dict.items() if k in model_state_dict}
# upadte state dict
model_state_dict.update(pretrained_dict)
self.load_state_dict(model_state_dict)
class ResNet18_Decoder(nn.Module):
"""
Combine multiple Up Residual Blocks to form a ResNet18 like decoder.
"""
def __init__(self, output_channels=3):
"""
Build the ResNet18-like decoder. The decoder is composed of a Linear layer.
The linear layer is interpolated (bilinear) to 512x16x16 which is then
processed by several Up-layer of Up Residual Blocks. Each Up-layer is
composed of k Up residual blocks. The first ones are without up sampling.
The last one increase the input size (h and w) by a factor 2 and reduce
the number of channels by a factor 2.
---------
INPUT
|---- output_size (tuple) the decoder output size. (C x H x W)
OUTPUT
|---- None
"""
nn.Module.__init__(self)
self.uplayer1 = nn.Sequential(UpResBlock(512, 512, upsample=False),
UpResBlock(512, 256, upsample=True))
self.uplayer2 = nn.Sequential(UpResBlock(256, 256, upsample=False),
UpResBlock(256, 128, upsample=True))
self.uplayer3 = nn.Sequential(UpResBlock(128, 128, upsample=False),
UpResBlock(128, 64, upsample=True))
self.uplayer4 = nn.Sequential(UpResBlock(64, 64, upsample=False),
UpResBlock(64, 64, upsample=True))
self.uplayer_final = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2, align_corners=True),
nn.Conv2d(64, output_channels, kernel_size=1, stride=1, bias=False))
self.final_activation = nn.Tanh()
def forward(self, x):
"""
Forward pass of the decoder.
----------
INPUT
|---- x (torch.Tensor) the input with dimension (B x embed_dim).
OUTPUT
|---- out (torch.Tensor) the reconstructed image (B x C x H x W).
"""
x = self.uplayer1(x)
x = self.uplayer2(x)
x = self.uplayer3(x)
x = self.uplayer4(x)
x = self.uplayer_final(x)
x = self.final_activation(x)
return x
class AE_SVDD_Hybrid(nn.Module):
"""
Autoencoder based on the ResNet18. The Encoder is a ResNet18 up to the
average pooling layer, and the decoder is a mirrored ResNet18. The embedding
is additionnaly processed through two convolutional layers to generate a
smaller embedding for the DeepSVDD data representation.
"""
def __init__(self, pretrain_ResNetEnc=False, output_channels=3, return_svdd_embed=True):
"""
Build the ResNet18 Autoencoder.The Encoder can be initialized with
weights pretrained on ImageNet.
----------
INPUT
|---- pretrain_ResNetEnc (bool) whether to use pretrained weights on
| ImageNet for the encoder initialization.
|---- out_channel (int) the output channel of the reconstructed image.
|---- return_embed (bool) whether to return the DeepSVDD embedding
| in the forward
OUTPUT
|---- None
"""
nn.Module.__init__(self)
self.return_svdd_embed = return_svdd_embed
self.encoder = ResNet18_Encoder(pretrained=pretrain_ResNetEnc)
self.conv_svdd = nn.Sequential(nn.AvgPool2d(kernel_size=3, stride=2),
nn.Conv2d(512, 256, 3, stride=1, bias=False),
nn.BatchNorm2d(256, affine=False),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(256, 128, 1, stride=1, bias=False),
nn.BatchNorm2d(128, affine=False))
self.decoder = ResNet18_Decoder(output_channels=output_channels)
def forward(self, input):
"""
Foward pass of the Autoencoder to reconstruct the provided image.
----------
INPUT
|---- input (torch.Tensor) the input image (Grayscale or RGB) with
| dimension (B x C x H x W).
OUTPUT
|---- rec (torch.Tensor) the reconstructed image (B x C' x H x W)
"""
ae_embedding = self.encoder(input)
rec = self.decoder(ae_embedding)
svdd_embedding = None
if self.return_svdd_embed:
svdd_embedding = self.conv_svdd(ae_embedding)
svdd_embedding = torch.flatten(svdd_embedding, start_dim=1)
return rec, svdd_embedding
class AE_ResNet18(nn.Module):
"""
Define an autoencoder with a ResNet18 backbone : The encoder is similar to a
ResNet18 up to the last convolutional layer to which an AvgPool2d layer is
added to divide the output dimension by a factor 2. The decoder is an Upsample
layer followed by a mirrored ResNet18. The deconvolution is performed by
upsampling + convolution.
"""
def __init__(self, pretrain_ResNetEnc=False, output_channels=3):
"""
Build the ResNet18 Autoencoder.The Encoder can be initialized with
weights pretrained on ImageNet.
----------
INPUT
|---- pretrain_ResNetEnc (bool) whether to use pretrained weights on
| ImageNet for the encoder initialization.
|---- out_channel (int) the output channel of the reconstructed image.
OUTPUT
|---- None
"""
nn.Module.__init__(self)
self.encoder = ResNet18_Encoder(pretrained=pretrain_ResNetEnc)
#self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
#self.interp = nn.Upsample(mode='bilinear', scale_factor=2, align_corners=True)
self.decoder = ResNet18_Decoder(output_channels=output_channels)
self.encoding_only = False
def forward(self, input):
"""
Foward pass of the Autoencoder to reconstruct the provided image.
----------
INPUT
|---- input (torch.Tensor) the input image (Grayscale or RGB) with
| dimension (B x C x H x W).
OUTPUT
|---- rec (torch.Tensor) the reconstructed image (B x C' x H x W)
"""
ae_embedding = self.encoder(input)
#ae_embedding = self.avg_pool(ae_embedding)
rec = None
if not self.encoding_only:
rec = self.decoder(ae_embedding) #self.interp(ae_embedding))
return rec, ae_embedding
| en | 0.798081 | Combine multiple Residual block to form a ResNet18 up to the Average poolong layer. The size of the embeding dimension can be different than the one from ResNet18. The ResNet18 part can be initialized with pretrained weights on ImageNet. Build the Encoder from the layer's specification. The encoder is composed of an initial 7x7 convolution that halves the input dimension (h and w) followed by several layers of residual blocks. Each layer is composed of k Residual blocks. The first one reduce the input height and width by a factor 2 while the number of channel is increased by 2. ---------- INPUT |---- pretrained (bool) whether the ResNet18 should be loaded with | pretrained weights on Imagenet. OUTPUT |---- None # First convolution # Residual layers Forward pass of the Encoder. ---------- INPUT |---- x (torch.Tensor) the input tensor (B x C x H x W). The input | image can be grayscale or RGB. If it's grayscale it will | be converted to RGB by stacking 3 copy. OUTPUT |---- out (torch.Tensor) the embedding of the image x in embed_dim | vector dimension. # if grayscale (1 channel) convert to to RGB by duplicating on 3 channel # assuming shape : (... x C x H x W) # first 1x1 convolution # 4 layers Initialize the Encoder's weights with the weights pretrained on ImageNet. ---------- INPUT |---- None OUTPUT |---- None # download ResNet18 trained on ImageNet state dict # Get the modified ResNet Encoder state dict # keep only matching keys # upadte state dict Combine multiple Up Residual Blocks to form a ResNet18 like decoder. Build the ResNet18-like decoder. The decoder is composed of a Linear layer. The linear layer is interpolated (bilinear) to 512x16x16 which is then processed by several Up-layer of Up Residual Blocks. Each Up-layer is composed of k Up residual blocks. The first ones are without up sampling. The last one increase the input size (h and w) by a factor 2 and reduce the number of channels by a factor 2. --------- INPUT |---- output_size (tuple) the decoder output size. (C x H x W) OUTPUT |---- None Forward pass of the decoder. ---------- INPUT |---- x (torch.Tensor) the input with dimension (B x embed_dim). OUTPUT |---- out (torch.Tensor) the reconstructed image (B x C x H x W). Autoencoder based on the ResNet18. The Encoder is a ResNet18 up to the average pooling layer, and the decoder is a mirrored ResNet18. The embedding is additionnaly processed through two convolutional layers to generate a smaller embedding for the DeepSVDD data representation. Build the ResNet18 Autoencoder.The Encoder can be initialized with weights pretrained on ImageNet. ---------- INPUT |---- pretrain_ResNetEnc (bool) whether to use pretrained weights on | ImageNet for the encoder initialization. |---- out_channel (int) the output channel of the reconstructed image. |---- return_embed (bool) whether to return the DeepSVDD embedding | in the forward OUTPUT |---- None Foward pass of the Autoencoder to reconstruct the provided image. ---------- INPUT |---- input (torch.Tensor) the input image (Grayscale or RGB) with | dimension (B x C x H x W). OUTPUT |---- rec (torch.Tensor) the reconstructed image (B x C' x H x W) Define an autoencoder with a ResNet18 backbone : The encoder is similar to a ResNet18 up to the last convolutional layer to which an AvgPool2d layer is added to divide the output dimension by a factor 2. The decoder is an Upsample layer followed by a mirrored ResNet18. The deconvolution is performed by upsampling + convolution. Build the ResNet18 Autoencoder.The Encoder can be initialized with weights pretrained on ImageNet. ---------- INPUT |---- pretrain_ResNetEnc (bool) whether to use pretrained weights on | ImageNet for the encoder initialization. |---- out_channel (int) the output channel of the reconstructed image. OUTPUT |---- None #self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) #self.interp = nn.Upsample(mode='bilinear', scale_factor=2, align_corners=True) Foward pass of the Autoencoder to reconstruct the provided image. ---------- INPUT |---- input (torch.Tensor) the input image (Grayscale or RGB) with | dimension (B x C x H x W). OUTPUT |---- rec (torch.Tensor) the reconstructed image (B x C' x H x W) #ae_embedding = self.avg_pool(ae_embedding) #self.interp(ae_embedding)) | 3.067917 | 3 |
core/tests/test_models.py | Pmtague/recipe-app-api | 0 | 6622795 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
# Test method names must all start with test_
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
# Test user info
email = '<EMAIL>'
password = '<PASSWORD>'
# Create test user
user = get_user_model().objects.create_user(
email=email,
password=password
)
# Assert that email and password created match those above
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
# Test user info
email = '<EMAIL>'
# Create test user
user = get_user_model().objects.create_user(email, 'test123')
# Assert that the email address is stored in all lowercase
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test123'
)
# Superuser is provided as part of the permissions mixin
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
# Test method names must all start with test_
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
# Test user info
email = '<EMAIL>'
password = '<PASSWORD>'
# Create test user
user = get_user_model().objects.create_user(
email=email,
password=password
)
# Assert that email and password created match those above
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
# Test user info
email = '<EMAIL>'
# Create test user
user = get_user_model().objects.create_user(email, 'test123')
# Assert that the email address is stored in all lowercase
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test123'
)
# Superuser is provided as part of the permissions mixin
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| en | 0.911789 | # Test method names must all start with test_ Test creating a new user with an email is successful # Test user info # Create test user # Assert that email and password created match those above Test the email for a new user is normalized # Test user info # Create test user # Assert that the email address is stored in all lowercase Test creating user with no email raises error Test creating a new superuser # Superuser is provided as part of the permissions mixin | 3.034855 | 3 |
sigopt-beats-vegas/predictor/features.py | meghanaravikumar/sigopt-examples | 213 | 6622796 | <filename>sigopt-beats-vegas/predictor/features.py
from collections import namedtuple
FeatureSet = namedtuple(
"FeatureSet",
[
"PTSpm", # Points per minute
"OREBpm", # Offensive rebounds per minute
"DREBpm", # Defensive rebounds per minute
"STLpm", # Steals per minute
"BLKpm", # Blocks per minute
"ASTpm", # Assists per minute
"PIPpm", # Points in paint per minute
"SCPpm", # Second chance points per minute
"FBPpm", # Fast break points per minute
"LCpm", # Lead changes per minute
"TTpm", # Times tied per minute
"LLpg", # Largest lead per game
"PDIFFpg", # Point differential per game
"FGApm", # Field goal attempts per minute
"FGMpm", # Field goals made per minute
"FTApm", # Free throw attempts per minute
"FTMpm", # Free throws made per minute
"TPFGApm", # Three point field goals attempted per minute
"TPFGMpm", # Three point field goals made per minute
"Q1PTSpg", # First quarter points per game
"Q2PTSpg", # Second quarter points per game
"Q3PTSpg", # Third quarter points per game
"Q4PTSpg", # Fourth quarter points per game
],
)
def calculate_features_from_boxscore(box_score, is_home_game):
"""Returns a FeatureSet given stats and a team_id.
stats - a list of result sets from a game
team_id - 0 for home, 1 for away
"""
if is_home_game:
team_id = 0
else:
team_id = 1
stats = box_score['resultSets']
try:
time = stats[5]['rowSet'][team_id][5]
if time:
minutes = float(time.split(':')[0])
return FeatureSet(
int(stats[5]['rowSet'][team_id][23]) / minutes,
int(stats[5]['rowSet'][team_id][15]) / minutes,
int(stats[5]['rowSet'][team_id][16]) / minutes,
int(stats[5]['rowSet'][team_id][19]) / minutes,
int(stats[5]['rowSet'][team_id][20]) / minutes,
int(stats[5]['rowSet'][team_id][18]) / minutes,
int(stats[6]['rowSet'][team_id][5]) / minutes,
int(stats[6]['rowSet'][team_id][6]) / minutes,
int(stats[6]['rowSet'][team_id][7]) / minutes,
int(stats[6]['rowSet'][team_id][9]) / minutes,
int(stats[6]['rowSet'][team_id][10]) / minutes,
int(stats[6]['rowSet'][team_id][9]),
int(stats[5]['rowSet'][team_id][24]),
int(stats[5]['rowSet'][team_id][7]) / minutes,
int(stats[5]['rowSet'][team_id][6]) / minutes,
int(stats[5]['rowSet'][team_id][13]) / minutes,
int(stats[5]['rowSet'][team_id][12]) / minutes,
int(stats[5]['rowSet'][team_id][10]) / minutes,
int(stats[5]['rowSet'][team_id][9]) / minutes,
int(stats[1]['rowSet'][team_id][7]),
int(stats[1]['rowSet'][team_id][8]),
int(stats[1]['rowSet'][team_id][9]),
int(stats[1]['rowSet'][team_id][10]),
)
else:
return None
except IndexError:
# Missing some crucial data, so skip this record
return None
| <filename>sigopt-beats-vegas/predictor/features.py
from collections import namedtuple
FeatureSet = namedtuple(
"FeatureSet",
[
"PTSpm", # Points per minute
"OREBpm", # Offensive rebounds per minute
"DREBpm", # Defensive rebounds per minute
"STLpm", # Steals per minute
"BLKpm", # Blocks per minute
"ASTpm", # Assists per minute
"PIPpm", # Points in paint per minute
"SCPpm", # Second chance points per minute
"FBPpm", # Fast break points per minute
"LCpm", # Lead changes per minute
"TTpm", # Times tied per minute
"LLpg", # Largest lead per game
"PDIFFpg", # Point differential per game
"FGApm", # Field goal attempts per minute
"FGMpm", # Field goals made per minute
"FTApm", # Free throw attempts per minute
"FTMpm", # Free throws made per minute
"TPFGApm", # Three point field goals attempted per minute
"TPFGMpm", # Three point field goals made per minute
"Q1PTSpg", # First quarter points per game
"Q2PTSpg", # Second quarter points per game
"Q3PTSpg", # Third quarter points per game
"Q4PTSpg", # Fourth quarter points per game
],
)
def calculate_features_from_boxscore(box_score, is_home_game):
"""Returns a FeatureSet given stats and a team_id.
stats - a list of result sets from a game
team_id - 0 for home, 1 for away
"""
if is_home_game:
team_id = 0
else:
team_id = 1
stats = box_score['resultSets']
try:
time = stats[5]['rowSet'][team_id][5]
if time:
minutes = float(time.split(':')[0])
return FeatureSet(
int(stats[5]['rowSet'][team_id][23]) / minutes,
int(stats[5]['rowSet'][team_id][15]) / minutes,
int(stats[5]['rowSet'][team_id][16]) / minutes,
int(stats[5]['rowSet'][team_id][19]) / minutes,
int(stats[5]['rowSet'][team_id][20]) / minutes,
int(stats[5]['rowSet'][team_id][18]) / minutes,
int(stats[6]['rowSet'][team_id][5]) / minutes,
int(stats[6]['rowSet'][team_id][6]) / minutes,
int(stats[6]['rowSet'][team_id][7]) / minutes,
int(stats[6]['rowSet'][team_id][9]) / minutes,
int(stats[6]['rowSet'][team_id][10]) / minutes,
int(stats[6]['rowSet'][team_id][9]),
int(stats[5]['rowSet'][team_id][24]),
int(stats[5]['rowSet'][team_id][7]) / minutes,
int(stats[5]['rowSet'][team_id][6]) / minutes,
int(stats[5]['rowSet'][team_id][13]) / minutes,
int(stats[5]['rowSet'][team_id][12]) / minutes,
int(stats[5]['rowSet'][team_id][10]) / minutes,
int(stats[5]['rowSet'][team_id][9]) / minutes,
int(stats[1]['rowSet'][team_id][7]),
int(stats[1]['rowSet'][team_id][8]),
int(stats[1]['rowSet'][team_id][9]),
int(stats[1]['rowSet'][team_id][10]),
)
else:
return None
except IndexError:
# Missing some crucial data, so skip this record
return None
| en | 0.726981 | # Points per minute # Offensive rebounds per minute # Defensive rebounds per minute # Steals per minute # Blocks per minute # Assists per minute # Points in paint per minute # Second chance points per minute # Fast break points per minute # Lead changes per minute # Times tied per minute # Largest lead per game # Point differential per game # Field goal attempts per minute # Field goals made per minute # Free throw attempts per minute # Free throws made per minute # Three point field goals attempted per minute # Three point field goals made per minute # First quarter points per game # Second quarter points per game # Third quarter points per game # Fourth quarter points per game Returns a FeatureSet given stats and a team_id. stats - a list of result sets from a game team_id - 0 for home, 1 for away # Missing some crucial data, so skip this record | 2.551422 | 3 |
lldb/test/API/functionalities/dyld-exec-linux/TestDyldExecLinux.py | ornata/llvm-project | 0 | 6622797 | """
Test that LLDB can launch a linux executable and then execs into the dynamic
loader into this program again.
"""
import lldb
import os
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestLinux64ExecViaDynamicLoader(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(oslist=no_match(['linux']))
@no_debug_info_test
@skipIf(oslist=["linux"], archs=["arm"])
def test(self):
self.build()
# Extracts path of the interpreter.
exe = self.getBuildArtifact("a.out")
spec = lldb.SBModuleSpec()
spec.SetFileSpec(lldb.SBFileSpec(exe))
interp_section = lldb.SBModule(spec).FindSection(".interp")
if not interp_section:
return
section_data = interp_section.GetSectionData()
error = lldb.SBError()
dyld_path = section_data.GetString(error,0)
if error.Fail():
return
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set a breakpoint in the main function that will get hit after the
# program exec's via the dynamic loader. The breakpoint will only get
# hit if we can successfully read the shared library lists in the
# DynamicLoaderPOSIXDYLD.cpp when we exec into the dynamic loader.
breakpoint_main = target.BreakpointCreateBySourceRegex("// Break here", lldb.SBFileSpec("main.cpp"))
# Setup our launch info to supply the dynamic loader path to the
# program so it gets two args:
# - path to a.out
# - path to dynamic loader
launch_info = lldb.SBLaunchInfo([dyld_path])
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertSuccess(error)
threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonExec)
self.assertEqual(len(threads), 1, "We got a thread stopped for exec.")
process.Continue();
# Stopped on main here.
self.assertEqual(process.GetState(), lldb.eStateStopped)
thread = process.GetSelectedThread()
self.assertIn("main", thread.GetFrameAtIndex(0).GetDisplayFunctionName())
| """
Test that LLDB can launch a linux executable and then execs into the dynamic
loader into this program again.
"""
import lldb
import os
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestLinux64ExecViaDynamicLoader(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(oslist=no_match(['linux']))
@no_debug_info_test
@skipIf(oslist=["linux"], archs=["arm"])
def test(self):
self.build()
# Extracts path of the interpreter.
exe = self.getBuildArtifact("a.out")
spec = lldb.SBModuleSpec()
spec.SetFileSpec(lldb.SBFileSpec(exe))
interp_section = lldb.SBModule(spec).FindSection(".interp")
if not interp_section:
return
section_data = interp_section.GetSectionData()
error = lldb.SBError()
dyld_path = section_data.GetString(error,0)
if error.Fail():
return
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set a breakpoint in the main function that will get hit after the
# program exec's via the dynamic loader. The breakpoint will only get
# hit if we can successfully read the shared library lists in the
# DynamicLoaderPOSIXDYLD.cpp when we exec into the dynamic loader.
breakpoint_main = target.BreakpointCreateBySourceRegex("// Break here", lldb.SBFileSpec("main.cpp"))
# Setup our launch info to supply the dynamic loader path to the
# program so it gets two args:
# - path to a.out
# - path to dynamic loader
launch_info = lldb.SBLaunchInfo([dyld_path])
error = lldb.SBError()
process = target.Launch(launch_info, error)
self.assertSuccess(error)
threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonExec)
self.assertEqual(len(threads), 1, "We got a thread stopped for exec.")
process.Continue();
# Stopped on main here.
self.assertEqual(process.GetState(), lldb.eStateStopped)
thread = process.GetSelectedThread()
self.assertIn("main", thread.GetFrameAtIndex(0).GetDisplayFunctionName())
| en | 0.874424 | Test that LLDB can launch a linux executable and then execs into the dynamic loader into this program again. # Extracts path of the interpreter. # Set a breakpoint in the main function that will get hit after the # program exec's via the dynamic loader. The breakpoint will only get # hit if we can successfully read the shared library lists in the # DynamicLoaderPOSIXDYLD.cpp when we exec into the dynamic loader. # Setup our launch info to supply the dynamic loader path to the # program so it gets two args: # - path to a.out # - path to dynamic loader # Stopped on main here. | 2.082911 | 2 |
learning/process/distributed/job.py | seasonfif/python | 0 | 6622798 | #!/usr/bin/env python
# coding=utf-8
class Job:
def __init__(self, job_id):
self.job_id = job_id
| #!/usr/bin/env python
# coding=utf-8
class Job:
def __init__(self, job_id):
self.job_id = job_id
| en | 0.244401 | #!/usr/bin/env python # coding=utf-8 | 2.410032 | 2 |
setup.py | kokokuo/pydomain | 1 | 6622799 | from setuptools import setup, find_packages
setup(
name='pydomain',
version='0.1',
author="kokokuo",
author_email="<EMAIL>",
description="This is a domain-driven design tatical building blocks package for python.",
packages=find_packages(exclude=["docs", "tests*"]),
install_requires=[
"sutoppu"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent"
]
)
| from setuptools import setup, find_packages
setup(
name='pydomain',
version='0.1',
author="kokokuo",
author_email="<EMAIL>",
description="This is a domain-driven design tatical building blocks package for python.",
packages=find_packages(exclude=["docs", "tests*"]),
install_requires=[
"sutoppu"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent"
]
)
| none | 1 | 1.290903 | 1 | |
pycrypto_tid_demo.py | nwinds/demoServer | 0 | 6622800 | from Crypto.Hash import SHA256
from Crypto.Hash import HMAC
from binascii import b2a_hex
from binascii import a2b_hex
import base64
def hmac(key, data):
if len(key) > 32:
print("Warning")
return HMAC.new(key, data, digestmod=SHA256).digest()
cid = 'ClientId00000001'
nonce = a2b_hex('0101010101010101')
authKey = 'ClientAuthKey001ClientAuthKey001'
message = bytes(cid + nonce).encode('utf-8')
secret = bytes(authKey).encode('utf-8')
print b2a_hex(base64.b64encode(hmac(secret, message)))
| from Crypto.Hash import SHA256
from Crypto.Hash import HMAC
from binascii import b2a_hex
from binascii import a2b_hex
import base64
def hmac(key, data):
if len(key) > 32:
print("Warning")
return HMAC.new(key, data, digestmod=SHA256).digest()
cid = 'ClientId00000001'
nonce = a2b_hex('0101010101010101')
authKey = 'ClientAuthKey001ClientAuthKey001'
message = bytes(cid + nonce).encode('utf-8')
secret = bytes(authKey).encode('utf-8')
print b2a_hex(base64.b64encode(hmac(secret, message)))
| none | 1 | 2.982124 | 3 | |
rasa_core/version.py | htonthat/rasa_core | 1 | 6622801 |
__version__ = '0.13.0a5'
|
__version__ = '0.13.0a5'
| none | 1 | 1.056348 | 1 | |
src/wp_cli/db_check.py | wpsmith/wp-cli-python | 0 | 6622802 | from wp.command import WPCommand
class DBCheck(WPCommand):
command = ['db', 'check']
# Extra arguments to pass to mysqldump. Refer to mysqldump docs.
fields = []
# Loads the environment’s MySQL option files.
# Default behavior is to skip loading them to avoid failures due to misconfiguration.
defaults = []
def __init__(self, **args):
super().__init__(**args)
self.fields = self.get_arg_value(key="fields", default_value=self.fields)
self.defaults = self.get_arg_value(key="defaults", default_value=self.defaults)
def params(self):
return []
| from wp.command import WPCommand
class DBCheck(WPCommand):
command = ['db', 'check']
# Extra arguments to pass to mysqldump. Refer to mysqldump docs.
fields = []
# Loads the environment’s MySQL option files.
# Default behavior is to skip loading them to avoid failures due to misconfiguration.
defaults = []
def __init__(self, **args):
super().__init__(**args)
self.fields = self.get_arg_value(key="fields", default_value=self.fields)
self.defaults = self.get_arg_value(key="defaults", default_value=self.defaults)
def params(self):
return []
| en | 0.754352 | # Extra arguments to pass to mysqldump. Refer to mysqldump docs. # Loads the environment’s MySQL option files. # Default behavior is to skip loading them to avoid failures due to misconfiguration. | 2.280333 | 2 |
models.py | JJendryka/Chores | 0 | 6622803 | <reponame>JJendryka/Chores<filename>models.py
from flask_sqlalchemy import SQLAlchemy
from authlib.integrations.sqla_oauth2 import OAuth2ClientMixin, OAuth2TokenMixin, OAuth2AuthorizationCodeMixin
from flask_bcrypt import generate_password_hash, check_password_hash
db = SQLAlchemy()
def init_app(app):
db.init_app(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(60), unique=True, nullable=False)
password = db.Column(db.Text, nullable=False)
is_admin = db.Column(db.Boolean, nullable=False)
counters = db.relationship('Counter', backref='user', lazy=True)
def get_user_id(self):
return self.id
def check_password(self, password):
return check_password_hash(self.password, password)
def set_password(self, password):
self.password = generate_password_hash(password)
class Chore(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True, nullable=False)
period_of_days = db.Column(db.Integer, nullable=False)
cooldown_time = db.Column(db.Integer)
minimum_point = db.Column(db.Integer)
counters = db.relationship('Counter', backref='chore', lazy=True)
class Counter(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Integer, nullable=False)
multiplier = db.Column(db.Float)
chore_id = db.Column(db.Integer, db.ForeignKey('chore.id', ondelete='CASCADE'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'), nullable=False)
class Client(db.Model, OAuth2ClientMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'))
user = db.relationship('User')
class Token(db.Model, OAuth2TokenMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'))
user = db.relationship('User')
class AuthorizationCode(db.Model, OAuth2AuthorizationCodeMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('user.id', ondelete='CASCADE')
)
user = db.relationship('User')
| from flask_sqlalchemy import SQLAlchemy
from authlib.integrations.sqla_oauth2 import OAuth2ClientMixin, OAuth2TokenMixin, OAuth2AuthorizationCodeMixin
from flask_bcrypt import generate_password_hash, check_password_hash
db = SQLAlchemy()
def init_app(app):
db.init_app(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(60), unique=True, nullable=False)
password = db.Column(db.Text, nullable=False)
is_admin = db.Column(db.Boolean, nullable=False)
counters = db.relationship('Counter', backref='user', lazy=True)
def get_user_id(self):
return self.id
def check_password(self, password):
return check_password_hash(self.password, password)
def set_password(self, password):
self.password = generate_password_hash(password)
class Chore(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True, nullable=False)
period_of_days = db.Column(db.Integer, nullable=False)
cooldown_time = db.Column(db.Integer)
minimum_point = db.Column(db.Integer)
counters = db.relationship('Counter', backref='chore', lazy=True)
class Counter(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Integer, nullable=False)
multiplier = db.Column(db.Float)
chore_id = db.Column(db.Integer, db.ForeignKey('chore.id', ondelete='CASCADE'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'), nullable=False)
class Client(db.Model, OAuth2ClientMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'))
user = db.relationship('User')
class Token(db.Model, OAuth2TokenMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'))
user = db.relationship('User')
class AuthorizationCode(db.Model, OAuth2AuthorizationCodeMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('user.id', ondelete='CASCADE')
)
user = db.relationship('User') | none | 1 | 2.606292 | 3 | |
Lesson16_Classes/1-Types.py | StyvenSoft/degree-python | 0 | 6622804 | <filename>Lesson16_Classes/1-Types.py
a_string = "<NAME>"
an_int = 12
print(type(a_string))
# prints "<class 'str'>"
print(type(an_int))
# prints "<class 'int'>"
print(type(5))
#<class 'int'>
my_dict = {}
print(type(my_dict))
# <class 'dict'>
my_list = []
print(type(my_list))
# <class 'list'>
| <filename>Lesson16_Classes/1-Types.py
a_string = "<NAME>"
an_int = 12
print(type(a_string))
# prints "<class 'str'>"
print(type(an_int))
# prints "<class 'int'>"
print(type(5))
#<class 'int'>
my_dict = {}
print(type(my_dict))
# <class 'dict'>
my_list = []
print(type(my_list))
# <class 'list'>
| en | 0.148824 | # prints "<class 'str'>" # prints "<class 'int'>" #<class 'int'> # <class 'dict'> # <class 'list'> | 3.475384 | 3 |
scripts/examples/ESTELA/ESTELA_PCA.py | teslakit/teslak | 12 | 6622805 | <reponame>teslakit/teslak<filename>scripts/examples/ESTELA/ESTELA_PCA.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# common
import os
import os.path as op
# pip
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
# DEV: override installed teslakit
import sys
sys.path.insert(0,'../../../')
# teslakit
from teslakit.project_site import PathControl
from teslakit.io.matlab import ReadMatfile
from teslakit.PCA import PCA_EstelaPred
from teslakit.plotting.EOFs import Plot_EOFs_EstelaPred
# --------------------------------------
# Test data storage
pc = PathControl()
p_tests = pc.p_test_data
p_test = op.join(p_tests, 'ESTELA', 'test_estela_PCA')
# --------------------------------------
# use teslakit test data
p_estela_pred = op.join(p_test, 'xds_SLP_estela_pred.nc')
xds_SLP_estela_pred = xr.open_dataset(p_estela_pred)
# Calculate PCA
xds_PCA = PCA_EstelaPred(xds_SLP_estela_pred, 'SLP')
xds_PCA.to_netcdf(op.join(p_test, 'xds_SLP_PCA.nc'))
print(xds_PCA)
# Plot EOFs
n_plot = 3
p_save = op.join(p_test, 'Plot_EOFs_EstelaPred')
Plot_EOFs_EstelaPred(xds_PCA, n_plot, p_save)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# common
import os
import os.path as op
# pip
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
# DEV: override installed teslakit
import sys
sys.path.insert(0,'../../../')
# teslakit
from teslakit.project_site import PathControl
from teslakit.io.matlab import ReadMatfile
from teslakit.PCA import PCA_EstelaPred
from teslakit.plotting.EOFs import Plot_EOFs_EstelaPred
# --------------------------------------
# Test data storage
pc = PathControl()
p_tests = pc.p_test_data
p_test = op.join(p_tests, 'ESTELA', 'test_estela_PCA')
# --------------------------------------
# use teslakit test data
p_estela_pred = op.join(p_test, 'xds_SLP_estela_pred.nc')
xds_SLP_estela_pred = xr.open_dataset(p_estela_pred)
# Calculate PCA
xds_PCA = PCA_EstelaPred(xds_SLP_estela_pred, 'SLP')
xds_PCA.to_netcdf(op.join(p_test, 'xds_SLP_PCA.nc'))
print(xds_PCA)
# Plot EOFs
n_plot = 3
p_save = op.join(p_test, 'Plot_EOFs_EstelaPred')
Plot_EOFs_EstelaPred(xds_PCA, n_plot, p_save) | en | 0.301738 | #!/usr/bin/env python # -*- coding: utf-8 -*- # common # pip # DEV: override installed teslakit # teslakit # -------------------------------------- # Test data storage # -------------------------------------- # use teslakit test data # Calculate PCA # Plot EOFs | 2.290646 | 2 |
Python/Introduction/1HelloWorld.py | phillipemoreira/HackerRank | 0 | 6622806 | <reponame>phillipemoreira/HackerRank
#Problem link: https://www.hackerrank.com/challenges/py-hello-world
# Write your code on the next line.
print("Hello, World!") | #Problem link: https://www.hackerrank.com/challenges/py-hello-world
# Write your code on the next line.
print("Hello, World!") | en | 0.847457 | #Problem link: https://www.hackerrank.com/challenges/py-hello-world # Write your code on the next line. | 2.921839 | 3 |
cotk/_utils/unordered_hash.py | ishine/cotk | 117 | 6622807 | '''
A module for hash unordered elements
'''
from typing import Union
from collections import OrderedDict
import hashlib
import json
import warnings
class UnorderedSha256:
'''
Using SHA256 on unordered elements
'''
def __init__(self):
self.result = [0] * 32
def update_data(self, data: Union[bytes, bytearray, memoryview]):
'''update digest by data. type(data)=bytes'''
digest = hashlib.sha256(data).digest()
self.update_hash(digest)
def update_hash(self, hashvalue):
'''update digest by hash. type(hashvalue)=bytes'''
for i, bit in enumerate(list(hashvalue)):
self.result[i] = (self.result[i] + bit) & 0xFF
def digest(self) -> bytes:
'''return unordered hashvalue'''
return bytes(self.result)
def hexdigest(self) -> str:
'''return unordered hashvalue'''
return bytes(self.result).hex()
def dumps_json(obj) -> bytes:
'''Generate bytes to identify the object by json serialization'''
if isinstance(obj, (str, int, float, bool)):
return str(obj).encode('utf-8')
return json.dumps(obj, sort_keys=True).encode('utf-8')
def dumps(obj) -> bytes:
'''Generate bytes to identify the object by repr'''
return simple_dumps(convert_obj(obj))
def simple_dumps(obj) -> bytes:
return repr(obj).encode('utf-8')
def convert_obj(obj):
if isinstance(obj, OrderedDict):
return convert_ordered_dict(obj)
for cls, func in special_type_processing_functions.items():
if isinstance(obj, cls):
return func(obj)
if not isinstance(obj, common_types):
warnings.warn("It's unsupported to dumps a %s object. The result may not be expected." % type(obj).__name__)
return obj
def convert_dict(obj):
return type(obj), [(convert_obj(k), convert_obj(v)) for k, v in sorted(obj.items())]
def convert_ordered_dict(obj):
return type(obj), [(convert_obj(k), convert_obj(v)) for k, v in obj.items()]
def convert_ordered_iterable(obj):
return type(obj), [convert_obj(item) for item in obj]
def convert_unordered_iterable(obj):
# Elements in a set or a frozenset is unordered. Sort them before dumps.
return type(obj), [convert_obj(item) for item in sorted(obj)]
special_type_processing_functions = {
tuple: convert_ordered_iterable,
list: convert_ordered_iterable,
set: convert_unordered_iterable,
frozenset: convert_unordered_iterable,
dict: convert_dict,
OrderedDict: convert_ordered_dict
}
common_types = (str, int, float, bytes, bytearray, bool, type, type(None))
| '''
A module for hash unordered elements
'''
from typing import Union
from collections import OrderedDict
import hashlib
import json
import warnings
class UnorderedSha256:
'''
Using SHA256 on unordered elements
'''
def __init__(self):
self.result = [0] * 32
def update_data(self, data: Union[bytes, bytearray, memoryview]):
'''update digest by data. type(data)=bytes'''
digest = hashlib.sha256(data).digest()
self.update_hash(digest)
def update_hash(self, hashvalue):
'''update digest by hash. type(hashvalue)=bytes'''
for i, bit in enumerate(list(hashvalue)):
self.result[i] = (self.result[i] + bit) & 0xFF
def digest(self) -> bytes:
'''return unordered hashvalue'''
return bytes(self.result)
def hexdigest(self) -> str:
'''return unordered hashvalue'''
return bytes(self.result).hex()
def dumps_json(obj) -> bytes:
'''Generate bytes to identify the object by json serialization'''
if isinstance(obj, (str, int, float, bool)):
return str(obj).encode('utf-8')
return json.dumps(obj, sort_keys=True).encode('utf-8')
def dumps(obj) -> bytes:
'''Generate bytes to identify the object by repr'''
return simple_dumps(convert_obj(obj))
def simple_dumps(obj) -> bytes:
return repr(obj).encode('utf-8')
def convert_obj(obj):
if isinstance(obj, OrderedDict):
return convert_ordered_dict(obj)
for cls, func in special_type_processing_functions.items():
if isinstance(obj, cls):
return func(obj)
if not isinstance(obj, common_types):
warnings.warn("It's unsupported to dumps a %s object. The result may not be expected." % type(obj).__name__)
return obj
def convert_dict(obj):
return type(obj), [(convert_obj(k), convert_obj(v)) for k, v in sorted(obj.items())]
def convert_ordered_dict(obj):
return type(obj), [(convert_obj(k), convert_obj(v)) for k, v in obj.items()]
def convert_ordered_iterable(obj):
return type(obj), [convert_obj(item) for item in obj]
def convert_unordered_iterable(obj):
# Elements in a set or a frozenset is unordered. Sort them before dumps.
return type(obj), [convert_obj(item) for item in sorted(obj)]
special_type_processing_functions = {
tuple: convert_ordered_iterable,
list: convert_ordered_iterable,
set: convert_unordered_iterable,
frozenset: convert_unordered_iterable,
dict: convert_dict,
OrderedDict: convert_ordered_dict
}
common_types = (str, int, float, bytes, bytearray, bool, type, type(None))
| en | 0.641927 | A module for hash unordered elements Using SHA256 on unordered elements update digest by data. type(data)=bytes update digest by hash. type(hashvalue)=bytes return unordered hashvalue return unordered hashvalue Generate bytes to identify the object by json serialization Generate bytes to identify the object by repr # Elements in a set or a frozenset is unordered. Sort them before dumps. | 3.403936 | 3 |
tests/test_pull.py | cincanproject/cincan-command | 1 | 6622808 | <filename>tests/test_pull.py
import logging
import pytest
from unittest import mock
from cincan.frontend import ToolImage
from cincan.configuration import Configuration
DEFAULT_IMAGE = "quay.io/cincan/test"
DEFAULT_STABLE_TAG = Configuration().default_stable_tag
DEFAULT_DEV_TAG = Configuration().default_dev_tag
def test_image_pull_no_default_tag(caplog):
caplog.set_level(logging.INFO)
# cincan/test image has only 'dev' tag
tool = ToolImage(image=DEFAULT_IMAGE, pull=True, rm=False)
logs = [l.message for l in caplog.records]
pull_msgs = [
f"pulling image with tag '{DEFAULT_STABLE_TAG}'...",
f"Tag 'latest' not found. Trying development tag '{DEFAULT_DEV_TAG}' instead."
]
# Ignore version check messages, get two first
assert logs[:len(pull_msgs)] == pull_msgs
def test_pull_not_cincan(caplog):
caplog.set_level(logging.INFO)
# Busybox is not 'cincan' image, pulling normally
tool = ToolImage(image="busybox", pull=True, rm=False)
pull_msgs = [
"pulling image with tag 'latest'...",
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
def test_pull_not_cincan_tag_not_found(caplog):
caplog.set_level(logging.INFO)
# Busybox is not 'cincan' image, pulling non existing tag
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image="busybox:cincan", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msgs = [
"pulling image with tag 'cincan'...",
"Tag 'cincan' not found. Is it typed correctly?"
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
def test_pull_tag_not_found(caplog):
caplog.set_level(logging.INFO)
# Pulling non-existing tag from cincan tool
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image=f"{DEFAULT_IMAGE}:not_found", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msgs = [
"pulling image with tag 'not_found'...",
"Tag 'not_found' not found. Is it typed correctly?"
]
logs = [l.message for l in caplog.records]
assert logs[:len(pull_msgs)] == pull_msgs
caplog.clear()
def test_pull_repository_not_found(caplog):
caplog.set_level(logging.INFO)
# Pulling from non-existing repository 'cincann'
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image="cincann/test_not_found", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msgs = [
"pulling image with tag 'latest'...",
"Repository not found or no access into it. Is it typed correctly?"
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
def test_pull_no_default_tags_no_credentials(caplog):
"""
Test for pulling non-existing 'cincan' image
Method behaves differently whether credentials for 'cincan' is found
(if Docker Hub credentials are found, it attempts to pull both tags)
"""
# Mock contents of ~/.docker/config.json to have no credentials
read_data = "{}"
mock_open = mock.mock_open(read_data=read_data)
caplog.set_level(logging.INFO)
# Mock with no credentials/custom config
with mock.patch("builtins.open", mock_open):
# Pulling 'cincan' image without default development or stable tag
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image="quay.io/cincan/test_not_found", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msg = 'Internal Server Error ("unauthorized: access to the requested resource is not authorized")'
logs = [l.message for l in caplog.records]
assert pull_msg in logs[1]
def test_batch_option_pull(caplog):
"""Test --batch option to disable some properties (version check, pull-progress bar"""
caplog.set_level(logging.INFO)
tool = ToolImage(image=f"{DEFAULT_IMAGE}:{DEFAULT_DEV_TAG}", pull=True, rm=False, batch=True)
pull_msgs = [
f"pulling image with tag '{DEFAULT_DEV_TAG}'...",
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
tool = ToolImage(image=f"cincan/test:{DEFAULT_DEV_TAG}", pull=True, rm=False, batch=False)
msg = f"No version information available for {DEFAULT_IMAGE}\n"
logs = [l.message for l in caplog.records]
assert msg in logs
def test_pull_cincan_tool_from_dockerhub(caplog):
"""Pull cincan image from Docker Hub -> Redirect into Quay expected"""
caplog.set_level(logging.INFO)
tool = ToolImage(image=f"cincan/test:{DEFAULT_DEV_TAG}", pull=True, rm=False, batch=True)
assert f"{DEFAULT_IMAGE}:{DEFAULT_DEV_TAG}" in tool.image.tags
| <filename>tests/test_pull.py
import logging
import pytest
from unittest import mock
from cincan.frontend import ToolImage
from cincan.configuration import Configuration
DEFAULT_IMAGE = "quay.io/cincan/test"
DEFAULT_STABLE_TAG = Configuration().default_stable_tag
DEFAULT_DEV_TAG = Configuration().default_dev_tag
def test_image_pull_no_default_tag(caplog):
caplog.set_level(logging.INFO)
# cincan/test image has only 'dev' tag
tool = ToolImage(image=DEFAULT_IMAGE, pull=True, rm=False)
logs = [l.message for l in caplog.records]
pull_msgs = [
f"pulling image with tag '{DEFAULT_STABLE_TAG}'...",
f"Tag 'latest' not found. Trying development tag '{DEFAULT_DEV_TAG}' instead."
]
# Ignore version check messages, get two first
assert logs[:len(pull_msgs)] == pull_msgs
def test_pull_not_cincan(caplog):
caplog.set_level(logging.INFO)
# Busybox is not 'cincan' image, pulling normally
tool = ToolImage(image="busybox", pull=True, rm=False)
pull_msgs = [
"pulling image with tag 'latest'...",
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
def test_pull_not_cincan_tag_not_found(caplog):
caplog.set_level(logging.INFO)
# Busybox is not 'cincan' image, pulling non existing tag
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image="busybox:cincan", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msgs = [
"pulling image with tag 'cincan'...",
"Tag 'cincan' not found. Is it typed correctly?"
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
def test_pull_tag_not_found(caplog):
caplog.set_level(logging.INFO)
# Pulling non-existing tag from cincan tool
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image=f"{DEFAULT_IMAGE}:not_found", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msgs = [
"pulling image with tag 'not_found'...",
"Tag 'not_found' not found. Is it typed correctly?"
]
logs = [l.message for l in caplog.records]
assert logs[:len(pull_msgs)] == pull_msgs
caplog.clear()
def test_pull_repository_not_found(caplog):
caplog.set_level(logging.INFO)
# Pulling from non-existing repository 'cincann'
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image="cincann/test_not_found", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msgs = [
"pulling image with tag 'latest'...",
"Repository not found or no access into it. Is it typed correctly?"
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
def test_pull_no_default_tags_no_credentials(caplog):
"""
Test for pulling non-existing 'cincan' image
Method behaves differently whether credentials for 'cincan' is found
(if Docker Hub credentials are found, it attempts to pull both tags)
"""
# Mock contents of ~/.docker/config.json to have no credentials
read_data = "{}"
mock_open = mock.mock_open(read_data=read_data)
caplog.set_level(logging.INFO)
# Mock with no credentials/custom config
with mock.patch("builtins.open", mock_open):
# Pulling 'cincan' image without default development or stable tag
with pytest.raises(SystemExit) as ex:
tool = ToolImage(image="quay.io/cincan/test_not_found", pull=True, rm=False)
assert ex.type == SystemExit
assert ex.value.code == 1
pull_msg = 'Internal Server Error ("unauthorized: access to the requested resource is not authorized")'
logs = [l.message for l in caplog.records]
assert pull_msg in logs[1]
def test_batch_option_pull(caplog):
"""Test --batch option to disable some properties (version check, pull-progress bar"""
caplog.set_level(logging.INFO)
tool = ToolImage(image=f"{DEFAULT_IMAGE}:{DEFAULT_DEV_TAG}", pull=True, rm=False, batch=True)
pull_msgs = [
f"pulling image with tag '{DEFAULT_DEV_TAG}'...",
]
logs = [l.message for l in caplog.records]
assert logs == pull_msgs
tool = ToolImage(image=f"cincan/test:{DEFAULT_DEV_TAG}", pull=True, rm=False, batch=False)
msg = f"No version information available for {DEFAULT_IMAGE}\n"
logs = [l.message for l in caplog.records]
assert msg in logs
def test_pull_cincan_tool_from_dockerhub(caplog):
"""Pull cincan image from Docker Hub -> Redirect into Quay expected"""
caplog.set_level(logging.INFO)
tool = ToolImage(image=f"cincan/test:{DEFAULT_DEV_TAG}", pull=True, rm=False, batch=True)
assert f"{DEFAULT_IMAGE}:{DEFAULT_DEV_TAG}" in tool.image.tags
| en | 0.777016 | # cincan/test image has only 'dev' tag # Ignore version check messages, get two first # Busybox is not 'cincan' image, pulling normally # Busybox is not 'cincan' image, pulling non existing tag # Pulling non-existing tag from cincan tool # Pulling from non-existing repository 'cincann' Test for pulling non-existing 'cincan' image Method behaves differently whether credentials for 'cincan' is found (if Docker Hub credentials are found, it attempts to pull both tags) # Mock contents of ~/.docker/config.json to have no credentials # Mock with no credentials/custom config # Pulling 'cincan' image without default development or stable tag Test --batch option to disable some properties (version check, pull-progress bar Pull cincan image from Docker Hub -> Redirect into Quay expected | 2.248144 | 2 |
records/test_talks.py | gridpp/dissem-toolkit | 1 | 6622809 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
#...for the Pixelman dataset wrapper.
from talks import Talk
class TalkTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_talk(self):
tf = open("testdata/testtalk.tsv", "r")
lines = tf.readlines()
tf.close()
## The test talk.
talk = Talk(lines[1])
# The talk title.
self.assertEqual(talk.getTalkTitle(), "A recent view of OSSEC and Elasticsearch at Scotgrid Glasgow")
# The talk date string.
self.assertEqual(talk.getTalkDateString(), "24 Mar 2015")
# The talk URL.
self.assertEqual(talk.getTalkUrl(), "https://indico.cern.ch/event/346931/session/3/contribution/39")
# The authors.
self.assertEqual(talk.getAuthors(), "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>")
# The event URL.
self.assertEqual(talk.getEventUrl(), "https://indico.cern.ch/event/346931/")
# The web entry.
self.assertEqual(talk.getWebEntry(), """<li>
<a href="https://indico.cern.ch/event/346931/session/3/contribution/39" target="_blank">A recent view of OSSEC and Elasticsearch at Scotgrid Glasgow</a> <br />
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <a href="https://indico.cern.ch/event/346931/" target="_blank">HEPiX Spring 2015 Workshop</a>, 24 Mar 2015.
</li>""")
# The category.
self.assertEqual(talk.getCategory(), "Data Storage Management")
if __name__ == "__main__":
lg.basicConfig(filename='log_test_talks.log', filemode='w', level=lg.DEBUG)
lg.info("")
lg.info("==========================================")
lg.info(" Logger output from records/test_talks.py ")
lg.info("==========================================")
lg.info("")
unittest.main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
#...for the Pixelman dataset wrapper.
from talks import Talk
class TalkTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_talk(self):
tf = open("testdata/testtalk.tsv", "r")
lines = tf.readlines()
tf.close()
## The test talk.
talk = Talk(lines[1])
# The talk title.
self.assertEqual(talk.getTalkTitle(), "A recent view of OSSEC and Elasticsearch at Scotgrid Glasgow")
# The talk date string.
self.assertEqual(talk.getTalkDateString(), "24 Mar 2015")
# The talk URL.
self.assertEqual(talk.getTalkUrl(), "https://indico.cern.ch/event/346931/session/3/contribution/39")
# The authors.
self.assertEqual(talk.getAuthors(), "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>")
# The event URL.
self.assertEqual(talk.getEventUrl(), "https://indico.cern.ch/event/346931/")
# The web entry.
self.assertEqual(talk.getWebEntry(), """<li>
<a href="https://indico.cern.ch/event/346931/session/3/contribution/39" target="_blank">A recent view of OSSEC and Elasticsearch at Scotgrid Glasgow</a> <br />
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <a href="https://indico.cern.ch/event/346931/" target="_blank">HEPiX Spring 2015 Workshop</a>, 24 Mar 2015.
</li>""")
# The category.
self.assertEqual(talk.getCategory(), "Data Storage Management")
if __name__ == "__main__":
lg.basicConfig(filename='log_test_talks.log', filemode='w', level=lg.DEBUG)
lg.info("")
lg.info("==========================================")
lg.info(" Logger output from records/test_talks.py ")
lg.info("==========================================")
lg.info("")
unittest.main()
| en | 0.672527 | #!/usr/bin/env python # -*- coding: utf-8 -*- #...the usual suspects. #...for the unit testing. #...for the logging. #...for the Pixelman dataset wrapper. ## The test talk. # The talk title. # The talk date string. # The talk URL. # The authors. # The event URL. # The web entry. <li> <a href="https://indico.cern.ch/event/346931/session/3/contribution/39" target="_blank">A recent view of OSSEC and Elasticsearch at Scotgrid Glasgow</a> <br /> <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <a href="https://indico.cern.ch/event/346931/" target="_blank">HEPiX Spring 2015 Workshop</a>, 24 Mar 2015. </li> # The category. | 2.338108 | 2 |
credentials_replacer/replacer.py | singleton11/aws-credential-replacer | 10 | 6622810 | #!/usr/bin/env python
import os
import sys
import click
from credstash import getSecret, listSecrets
from jinja2 import Environment, FileSystemLoader
def render_with_credentials(file):
"""Render file argument with credstash credentials
Load file as jinja2 template and render it with context where keys are
credstash keys and values are credstash values
Args:
file (str): jinja2 template file path
Returns:
str: Rendered string
"""
env = Environment(loader=FileSystemLoader(os.path.dirname(file)))
template = env.get_template(os.path.basename(file))
context = {secret['name']: getSecret(secret['name'])
for secret in listSecrets()}
return template.render(**context)
@click.command()
@click.argument('file')
def main(file):
"""Output rendered template
Args:
file (str): jinja2 template file path
"""
sys.stdout.write(render_with_credentials(file))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import os
import sys
import click
from credstash import getSecret, listSecrets
from jinja2 import Environment, FileSystemLoader
def render_with_credentials(file):
"""Render file argument with credstash credentials
Load file as jinja2 template and render it with context where keys are
credstash keys and values are credstash values
Args:
file (str): jinja2 template file path
Returns:
str: Rendered string
"""
env = Environment(loader=FileSystemLoader(os.path.dirname(file)))
template = env.get_template(os.path.basename(file))
context = {secret['name']: getSecret(secret['name'])
for secret in listSecrets()}
return template.render(**context)
@click.command()
@click.argument('file')
def main(file):
"""Output rendered template
Args:
file (str): jinja2 template file path
"""
sys.stdout.write(render_with_credentials(file))
if __name__ == '__main__':
main()
| en | 0.51294 | #!/usr/bin/env python Render file argument with credstash credentials Load file as jinja2 template and render it with context where keys are credstash keys and values are credstash values Args: file (str): jinja2 template file path Returns: str: Rendered string Output rendered template Args: file (str): jinja2 template file path | 3.010739 | 3 |
main/Customer/views.py | VikasSherawat/OrderFood | 0 | 6622811 | from django.shortcuts import render
from django.contrib import messages
# Create your views here.
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from ..models import Shop, User, Customer, FoodItem,Order
def fooditems(request, shop_id):
shops = Shop.objects.all()
current_shop = get_object_or_404(Shop, pk=shop_id)
return render(request, 'main/fooditems.html', {'shop': current_shop})
def buy_fooditem(request, fooditem_id):
if not request.user.is_authenticated:
print("user not authenticated")
messages.add_message(request, messages.INFO, 'You must be logged in to place an order')
return redirect('home')
else:
fooditem = get_object_or_404(FoodItem, pk=fooditem_id)
current_user = get_object_or_404(Customer, user_id=request.user.id)
if current_user.balance < fooditem.price:
messages.add_message(request, messages.ERROR, 'Insufficient Balance, Please buy credit to Order Food')
return redirect('home')
order = Order(isServed=False,customer_id=request.user.customer.id,bill=fooditem.price)
order.save()
fooditem.orders.add(order)
fooditem.save()
fooditem.shop.shop_owner.credit += fooditem.price
fooditem.shop.shop_owner.save()
current_user.balance -= fooditem.price
current_user.save()
print(fooditem.shop_id)
context = {'fooditem': fooditem, 'user': current_user, 'shop': fooditem.shop_id}
return render(request, "main/order_confirmation.html", context)
| from django.shortcuts import render
from django.contrib import messages
# Create your views here.
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from ..models import Shop, User, Customer, FoodItem,Order
def fooditems(request, shop_id):
shops = Shop.objects.all()
current_shop = get_object_or_404(Shop, pk=shop_id)
return render(request, 'main/fooditems.html', {'shop': current_shop})
def buy_fooditem(request, fooditem_id):
if not request.user.is_authenticated:
print("user not authenticated")
messages.add_message(request, messages.INFO, 'You must be logged in to place an order')
return redirect('home')
else:
fooditem = get_object_or_404(FoodItem, pk=fooditem_id)
current_user = get_object_or_404(Customer, user_id=request.user.id)
if current_user.balance < fooditem.price:
messages.add_message(request, messages.ERROR, 'Insufficient Balance, Please buy credit to Order Food')
return redirect('home')
order = Order(isServed=False,customer_id=request.user.customer.id,bill=fooditem.price)
order.save()
fooditem.orders.add(order)
fooditem.save()
fooditem.shop.shop_owner.credit += fooditem.price
fooditem.shop.shop_owner.save()
current_user.balance -= fooditem.price
current_user.save()
print(fooditem.shop_id)
context = {'fooditem': fooditem, 'user': current_user, 'shop': fooditem.shop_id}
return render(request, "main/order_confirmation.html", context)
| en | 0.968116 | # Create your views here. | 2.415996 | 2 |
flaxOptimizersBenchmark/mnist.py | nestordemeure/flaxOptimizersBenchmark | 2 | 6622812 | import tensorflow_datasets as tfds
from .architectures import SimpleCNN
from .training_loop import cross_entropy, accuracy, training_loop, make_training_loop_description, make_problem_description, Experiment
# defines the model
model=SimpleCNN(num_classes=10)
model_name="SimpleCNN"
# defines the training parameters
batch_size=256
nb_epochs=10
def load_mnist(dataset_path, download_if_needed=True):
"""
gets the dataset from the given path
this function works ONLY if the dataset has been previously downloaded
"""
# this download by default, see https://www.tensorflow.org/datasets/api_docs/python/tfds/load
(train_dataset, test_dataset), info = tfds.load('mnist', data_dir=dataset_path, download=download_if_needed, split=['train','test'], as_supervised=True, shuffle_files=True, with_info=True)
#nb_classes = info.features['label'].num_classes
# converts values to floats between 0.0 and 1.0
def normalize_picture(inputs,labels): return float(inputs) / 255.0, labels
train_dataset = train_dataset.map(normalize_picture, deterministic=False)
test_dataset = test_dataset.map(normalize_picture, deterministic=False)
return train_dataset, test_dataset
def run_MNIST(dataset_path, optimizer_with_description, test_metrics={"accuracy":accuracy}, output_folder="../data", random_seed=None, display=True):
"""
Runs a SimpleCNN model on the MNIST dataset
`optimizer_with_description` is an optimizer and its description as produced by `make_optimizer`
`test_metrics` is a lost of function that will be evaluated on the test dataset at the end of each epoch
`output_folder` is the folder where the result of the experiment will be stored
`random_seed` can be specified to make the experiment deterministic
`display` can be set to false to hide training informations
"""
# description of the problem
optimizer, optimizer_description, optimizer_metrics = optimizer_with_description
training_loop_description = make_training_loop_description(nb_epochs=nb_epochs, batch_size=batch_size, random_seed=random_seed)
problem_description = make_problem_description(benchmark_name="MNIST", model_name=model_name, training_loop_description=training_loop_description, optimizer_description=optimizer_description)
experiment = Experiment(problem_description, output_folder)
# trains the model
train_dataset, test_dataset = load_mnist(dataset_path)
return training_loop(experiment, model, cross_entropy, optimizer, optimizer_metrics, test_metrics, train_dataset, test_dataset, display)
| import tensorflow_datasets as tfds
from .architectures import SimpleCNN
from .training_loop import cross_entropy, accuracy, training_loop, make_training_loop_description, make_problem_description, Experiment
# defines the model
model=SimpleCNN(num_classes=10)
model_name="SimpleCNN"
# defines the training parameters
batch_size=256
nb_epochs=10
def load_mnist(dataset_path, download_if_needed=True):
"""
gets the dataset from the given path
this function works ONLY if the dataset has been previously downloaded
"""
# this download by default, see https://www.tensorflow.org/datasets/api_docs/python/tfds/load
(train_dataset, test_dataset), info = tfds.load('mnist', data_dir=dataset_path, download=download_if_needed, split=['train','test'], as_supervised=True, shuffle_files=True, with_info=True)
#nb_classes = info.features['label'].num_classes
# converts values to floats between 0.0 and 1.0
def normalize_picture(inputs,labels): return float(inputs) / 255.0, labels
train_dataset = train_dataset.map(normalize_picture, deterministic=False)
test_dataset = test_dataset.map(normalize_picture, deterministic=False)
return train_dataset, test_dataset
def run_MNIST(dataset_path, optimizer_with_description, test_metrics={"accuracy":accuracy}, output_folder="../data", random_seed=None, display=True):
"""
Runs a SimpleCNN model on the MNIST dataset
`optimizer_with_description` is an optimizer and its description as produced by `make_optimizer`
`test_metrics` is a lost of function that will be evaluated on the test dataset at the end of each epoch
`output_folder` is the folder where the result of the experiment will be stored
`random_seed` can be specified to make the experiment deterministic
`display` can be set to false to hide training informations
"""
# description of the problem
optimizer, optimizer_description, optimizer_metrics = optimizer_with_description
training_loop_description = make_training_loop_description(nb_epochs=nb_epochs, batch_size=batch_size, random_seed=random_seed)
problem_description = make_problem_description(benchmark_name="MNIST", model_name=model_name, training_loop_description=training_loop_description, optimizer_description=optimizer_description)
experiment = Experiment(problem_description, output_folder)
# trains the model
train_dataset, test_dataset = load_mnist(dataset_path)
return training_loop(experiment, model, cross_entropy, optimizer, optimizer_metrics, test_metrics, train_dataset, test_dataset, display)
| en | 0.813575 | # defines the model # defines the training parameters gets the dataset from the given path this function works ONLY if the dataset has been previously downloaded # this download by default, see https://www.tensorflow.org/datasets/api_docs/python/tfds/load #nb_classes = info.features['label'].num_classes # converts values to floats between 0.0 and 1.0 Runs a SimpleCNN model on the MNIST dataset `optimizer_with_description` is an optimizer and its description as produced by `make_optimizer` `test_metrics` is a lost of function that will be evaluated on the test dataset at the end of each epoch `output_folder` is the folder where the result of the experiment will be stored `random_seed` can be specified to make the experiment deterministic `display` can be set to false to hide training informations # description of the problem # trains the model | 3.358751 | 3 |
{{cookiecutter.repository_name}}/{{cookiecutter.package_name}}/problem/__init__.py | Aiwizo/pytorch-lantern-template | 1 | 6622813 | <reponame>Aiwizo/pytorch-lantern-template
from {{cookiecutter.package_name}}.problem.example import Example
from {{cookiecutter.package_name}}.problem.datasets import datasets
| from {{cookiecutter.package_name}}.problem.example import Example
from {{cookiecutter.package_name}}.problem.datasets import datasets | none | 1 | 1.09958 | 1 | |
visualizer/test/generate-memory-overview.py | asavonic/memprof | 0 | 6622814 | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import json
import random
current_time = int(time.time())
random.seed(current_time)
memory = 100
def generate_memstamp():
global memory
global current_time
current_time += random.randint(1, 5)
memory += random.randint(-1, 1) * random.random() * 20
if memory <= 0:
memory += random.random() * 200
backtrace = ["foo::baz()", "foo::bar()", "foo::foo()"]
return {
"x": current_time,
"y": float("%.4f" % memory),
"bt": backtrace,
"id": 0xff
}
memstamps = [generate_memstamp() for i in range(0, 1000)]
print(json.dumps([stamp for stamp in memstamps]))
| #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import json
import random
current_time = int(time.time())
random.seed(current_time)
memory = 100
def generate_memstamp():
global memory
global current_time
current_time += random.randint(1, 5)
memory += random.randint(-1, 1) * random.random() * 20
if memory <= 0:
memory += random.random() * 200
backtrace = ["foo::baz()", "foo::bar()", "foo::foo()"]
return {
"x": current_time,
"y": float("%.4f" % memory),
"bt": backtrace,
"id": 0xff
}
memstamps = [generate_memstamp() for i in range(0, 1000)]
print(json.dumps([stamp for stamp in memstamps]))
| en | 0.756309 | #!/usr/bin/env python # The MIT License (MIT) # # Copyright (c) 2016 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. | 2.414246 | 2 |
app/core/tests/test_admin.py | FaridQattali/recipe-app-api | 0 | 6622815 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
'<EMAIL>',
'admin123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'test123',
name='farid'
)
def test_users_listed(self):
"""Test users are listed in django admin"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test user edit page renders correctlly"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_user_create_page(self):
"""Tests if user create page renders correctly"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
'<EMAIL>',
'admin123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'test123',
name='farid'
)
def test_users_listed(self):
"""Test users are listed in django admin"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test user edit page renders correctlly"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_user_create_page(self):
"""Tests if user create page renders correctly"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| en | 0.522682 | Test users are listed in django admin Test user edit page renders correctlly Tests if user create page renders correctly | 2.627995 | 3 |
test/fixtures/projects/printenv/project/action_plugins/look_at_environment.py | valbendan/ansible-runner | 658 | 6622816 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import os
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = result['failed'] = False
result['msg'] = ''
env_dict = dict(os.environ)
result['printenv'] = '\n'.join(
'{0}={1}'.format(k, v) for k, v in env_dict.items()
)
result['environment'] = env_dict
result['cwd'] = os.getcwd()
return result
| from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import os
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = result['failed'] = False
result['msg'] = ''
env_dict = dict(os.environ)
result['printenv'] = '\n'.join(
'{0}={1}'.format(k, v) for k, v in env_dict.items()
)
result['environment'] = env_dict
result['cwd'] = os.getcwd()
return result
| none | 1 | 2.166121 | 2 | |
berktempy/core.py | d-chambers/2018_agu_workshop | 0 | 6622817 | """
A library for analogizing Berkley temperature data. Created in the best
practices for open-source software development at AGU-2018.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import requests
def _generate_berkley_earth_location_url(location: str) -> str:
" "
base = 'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/'
location = f'{location.lower()}-TAVG-Trend.txt'
# Download the content of the URL
return base + location
def download_data(location: str, path=None) -> str:
url = _generate_berkley_earth_location_url(location)
response = requests.get(url)
# Save it to a file
if path:
with open(path, 'w') as fi:
fi.write(response.text)
return response.text
def load_data(path_or_str: str):
path = Path(path_or_str)
try:
if path.exists():
return np.loadtxt("data.txt", comments="%")
except OSError:
pass
pstring = [x.rstrip().split() for x in path_or_str.splitlines()
if not x.startswith('%') and x.rstrip()]
return np.array(pstring).astype(float)
# In[79]:
# test for loading text
# Extract the monthly temperature anomaly and calculate an approximate "decimal year" to use in plotting.
# In[4]:
# Plot the data so we can see what it's like.
# In[5]:
decimal_year = data[:, 0] + 1 / 12 * (data[:, 1] - 1)
temp_anomaly = data[:, 2]
plt.figure(figsize=(10, 6))
plt.title("Temperature anomaly for Australia")
plt.plot(decimal_year, temp_anomaly)
plt.xlabel('year')
plt.ylabel('temperature anomaly (C)')
plt.grid()
plt.xlim(decimal_year.min(), decimal_year.max())
# The data are kind of noisy at this scale so let's calculate a 12-month moving average for a smoother time series.
# In[80]:
def moving_avg(temp_anomaly):
moving_avg = np.full(temp_anomaly.size, np.nan)
for i in range(6, moving_avg.size - 6):
moving_avg[i] = np.mean(temp_anomaly[i - 6:i + 6])
# In[7]:
plt.figure(figsize=(10, 6))
plt.title("Temperature anomaly for Australia")
plt.plot(decimal_year, temp_anomaly, label="anomaly")
plt.plot(decimal_year, moving_avg, label="12-month moving average", linewidth=3)
plt.xlabel('year')
plt.ylabel('temperature anomaly (C)')
plt.legend()
plt.grid()
plt.xlim(decimal_year.min(), decimal_year.max())
| """
A library for analogizing Berkley temperature data. Created in the best
practices for open-source software development at AGU-2018.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import requests
def _generate_berkley_earth_location_url(location: str) -> str:
" "
base = 'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/'
location = f'{location.lower()}-TAVG-Trend.txt'
# Download the content of the URL
return base + location
def download_data(location: str, path=None) -> str:
url = _generate_berkley_earth_location_url(location)
response = requests.get(url)
# Save it to a file
if path:
with open(path, 'w') as fi:
fi.write(response.text)
return response.text
def load_data(path_or_str: str):
path = Path(path_or_str)
try:
if path.exists():
return np.loadtxt("data.txt", comments="%")
except OSError:
pass
pstring = [x.rstrip().split() for x in path_or_str.splitlines()
if not x.startswith('%') and x.rstrip()]
return np.array(pstring).astype(float)
# In[79]:
# test for loading text
# Extract the monthly temperature anomaly and calculate an approximate "decimal year" to use in plotting.
# In[4]:
# Plot the data so we can see what it's like.
# In[5]:
decimal_year = data[:, 0] + 1 / 12 * (data[:, 1] - 1)
temp_anomaly = data[:, 2]
plt.figure(figsize=(10, 6))
plt.title("Temperature anomaly for Australia")
plt.plot(decimal_year, temp_anomaly)
plt.xlabel('year')
plt.ylabel('temperature anomaly (C)')
plt.grid()
plt.xlim(decimal_year.min(), decimal_year.max())
# The data are kind of noisy at this scale so let's calculate a 12-month moving average for a smoother time series.
# In[80]:
def moving_avg(temp_anomaly):
moving_avg = np.full(temp_anomaly.size, np.nan)
for i in range(6, moving_avg.size - 6):
moving_avg[i] = np.mean(temp_anomaly[i - 6:i + 6])
# In[7]:
plt.figure(figsize=(10, 6))
plt.title("Temperature anomaly for Australia")
plt.plot(decimal_year, temp_anomaly, label="anomaly")
plt.plot(decimal_year, moving_avg, label="12-month moving average", linewidth=3)
plt.xlabel('year')
plt.ylabel('temperature anomaly (C)')
plt.legend()
plt.grid()
plt.xlim(decimal_year.min(), decimal_year.max())
| en | 0.823271 | A library for analogizing Berkley temperature data. Created in the best practices for open-source software development at AGU-2018. # Download the content of the URL # Save it to a file # In[79]: # test for loading text # Extract the monthly temperature anomaly and calculate an approximate "decimal year" to use in plotting. # In[4]: # Plot the data so we can see what it's like. # In[5]: # The data are kind of noisy at this scale so let's calculate a 12-month moving average for a smoother time series. # In[80]: # In[7]: | 3.503273 | 4 |
tests/example_helpers.py | aenglander/python-cose | 0 | 6622818 | <filename>tests/example_helpers.py
from os import path
EXAMPLE_ROOT = path.join(path.dirname(__file__), "example_data")
| <filename>tests/example_helpers.py
from os import path
EXAMPLE_ROOT = path.join(path.dirname(__file__), "example_data")
| none | 1 | 1.597717 | 2 | |
output.py | yuyobit/decode | 2 | 6622819 | <reponame>yuyobit/decode
import csv
import datetime
import settings
import sqlite3
# CSV output provides a simple dumping of decoded values
# advanced functions like correcting data according to bulletin modifiers will not be done
# as the CSV file is newly created every time
# also station information is not saved in the CSV file
def writeCsvOutput():
try:
print()
print('Writing to CSV output file ' + settings.output + '...')
outputFile = open(settings.output, 'w')
writer = csv.DictWriter(outputFile, fieldnames=['bulletin_id', 'bulletin_issuer', 'station_id',
'timestamp', 'modifier_type', 'modifier_sequence', 'temperature', 'dew_point_temperature',
'rel_humidity', 'wind_direction', 'wind_speed', 'gust_speed', 'station_pressure', 'pressure',
'cloud_cover', 'sun_duration', 'precipitation_amount', 'precipitation_duration', 'current_weather', 'snow_depth'],
quoting=csv.QUOTE_ALL, delimiter=',')
writer.writeheader()
for dataRow in settings.decodedData:
if dataRow['modifier'] != None:
dataRow['modifier_type'] = dataRow['modifier']['type']
dataRow['modifier_sequence'] = dataRow['modifier']['sequence']
else:
dataRow['modifier_type'] = None
dataRow['modifier_sequence'] = None
del dataRow['modifier']
if dataRow['precipitation'] != None:
# not possible to write more than one precipitation entry to CSV
for precip in dataRow['precipitation']:
if precip != None:
dataRow['precipitation_amount'] = precip['amount']
dataRow['precipitation_duration'] = precip['duration']
break
else:
dataRow['precipitation_amount'] = None
dataRow['precipitation_duration'] = None
del dataRow['precipitation']
writer.writerow(dataRow)
except IOError:
sys.exit('Could not open output file. Exiting.')
def writeSqliteOutput():
print()
print('Writing to Sqlite output container ' + settings.output + '...')
connection = sqlite3.connect(settings.output)
cursor = connection.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS station (
wmo INTEGER PRIMARY KEY,
icao TEXT,
lat REAL,
lon REAL,
ele REAL,
name TEXT,
int_name TEXT)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS synop_daily (
wmo INTEGER,
date TEXT,
min_temperature REAL,
max_temperature REAL,
precipitation REAL,
sun_duration REAL,
correction_sequence TEXT,
amendment_sequence TEXT,
PRIMARY KEY(wmo, date))
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS synop (
wmo INTEGER,
timestamp TEXT,
temperature REAL,
dew_point_temperature REAL,
rel_humidity REAL,
wind_direction INTEGER,
wind_speed REAL,
gust_speed REAL,
station_pressure REAL,
pressure REAL,
cloud_cover INTEGER,
sun_duration REAL,
current_weather INTEGER,
snow_depth REAL,
correction_sequence TEXT,
amendment_sequence TEXT,
PRIMARY KEY(wmo, timestamp))
''')
# todo precipitation
connection.commit()
stations = []
synop = []
for dataRow in settings.decodedData:
station = settings.stationInventory[dataRow['station_id']]
# make sure station ends up in list only once
duplicates = filter(lambda data: data[0] == dataRow['station_id'], stations)
if len(duplicates) == 0:
stations.append((station['wmo'], unicode(station['icao'], 'utf-8'),
station['lat'], station['lon'], station['ele'],
unicode(station['name'], 'utf-8'), unicode(station['int_name'], 'utf-8')))
# deal with amendments and corrections later
if dataRow['modifier'] == None or (dataRow['modifier']['type'] != 'AA' and dataRow['modifier']['type'] != 'CC'):
synop.append((dataRow['station_id'], dataRow['timestamp'], dataRow['temperature'], dataRow['dew_point_temperature'],
dataRow['rel_humidity'], dataRow['wind_direction'], dataRow['wind_speed'],
dataRow['gust_speed'], dataRow['station_pressure'], dataRow['pressure'],
dataRow['cloud_cover'], dataRow['sun_duration'], dataRow['current_weather'], dataRow['snow_depth'],
'', ''))
if dataRow['daily_precipitation'] != None:
if dataRow['timestamp'].hour >= 0 and dataRow['timestamp'].hour < 12:
date = dataRow['timestamp'] - datetime.timedelta(days=1)
else:
date = dataRow['timestamp']
station = dataRow['station_id']
cursor.execute('SELECT * FROM synop_daily WHERE wmo = ? AND date = ?', (station, date.strftime("%Y-%m-%d")))
if cursor.fetchone() != None:
cursor.execute('UPDATE synop_daily SET precipitation = ? WHERE wmo = ? AND date = ?',
(dataRow['daily_precipitation'], station, date.strftime("%Y-%m-%d")))
else:
cursor.execute('INSERT INTO synop_daily VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(station, date.strftime("%Y-%m-%d"), None, None, dataRow['daily_precipitation'], None, '', ''))
connection.commit()
if dataRow['daily_sun_duration'] != None:
date = dataRow['timestamp'] - datetime.timedelta(days=1)
station = dataRow['station_id']
cursor.execute('SELECT * FROM synop_daily WHERE wmo = ? AND date = ?', (station, date.strftime("%Y-%m-%d")))
if cursor.fetchone() != None:
cursor.execute('UPDATE synop_daily SET sun_duration = ? WHERE wmo = ? AND date = ?',
(dataRow['daily_sun_duration'], station, date.strftime("%Y-%m-%d")))
else:
cursor.execute('INSERT INTO synop_daily VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(station, date.strftime("%Y-%m-%d"), None, None, None, dataRow['daily_sun_duration'], '', ''))
connection.commit()
# IGNORE means that it does not fail if the key already exists
cursor.executemany('INSERT OR IGNORE INTO station VALUES (?, ?, ?, ?, ?, ?, ?)', stations)
cursor.executemany('INSERT OR IGNORE INTO synop VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', synop)
connection.commit()
amendments = filter(lambda data: data['modifier'] != None and data['modifier']['type'] == 'AA', settings.decodedData)
for amendment in amendments:
idTuple = (correction['station_id'], correction['timestamp'])
cursor.execute('SELECT * FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
result = cursor.fetchone()
# insert only if data is either not in the DB
# or the amendment sequence is not present (i.e. has not been amended so far)
# or the amendment sequence is lower (i.e. our amendment is newer)
if result == None or result[15] == None or result[15] < correction['modifier']['sequence']:
if result != None:
correctionSeq = result[14]
else:
correctionSeq = ''
cursor.execute('DELETE FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
cursor.execute('INSERT INTO synop VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(amendment['station_id'], amendment['timestamp'], amendment['temperature'], amendment['dew_point_temperature'],
amendment['rel_humidity'], amendment['wind_direction'], amendment['wind_speed'],
amendment['gust_speed'], amendment['station_pressure'], amendment['pressure'],
amendment['cloud_cover'], amendment['sun_duration'], amendment['current_weather'], amendment['snow_depth'],
amendmentSeq, amendment['modifier']['sequence']))
connection.commit()
corrections = filter(lambda data: data['modifier'] != None and data['modifier']['type'] == 'CC', settings.decodedData)
for correction in corrections:
idTuple = (correction['station_id'], correction['timestamp'])
cursor.execute('SELECT * FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
result = cursor.fetchone()
# insert only if data is either not in the DB
# or the correction sequence is not present (i.e. has not been corrected so far)
# or the correction sequence is lower (i.e. our correction is newer)
if result == None or result[14] == None or result[14] < correction['modifier']['sequence']:
if result != None:
amendmentSeq = result[15]
else:
amendmentSeq = ''
cursor.execute('DELETE FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
cursor.execute('INSERT INTO synop VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(correction['station_id'], correction['timestamp'], correction['temperature'], correction['dew_point_temperature'],
correction['rel_humidity'], correction['wind_direction'], correction['wind_speed'],
correction['gust_speed'], correction['station_pressure'], correction['pressure'],
correction['cloud_cover'], correction['sun_duration'], correction['current_weather'], correction['snow_depth'],
correction['modifier']['sequence'], amendmentSeq))
connection.commit()
connection.close()
| import csv
import datetime
import settings
import sqlite3
# CSV output provides a simple dumping of decoded values
# advanced functions like correcting data according to bulletin modifiers will not be done
# as the CSV file is newly created every time
# also station information is not saved in the CSV file
def writeCsvOutput():
try:
print()
print('Writing to CSV output file ' + settings.output + '...')
outputFile = open(settings.output, 'w')
writer = csv.DictWriter(outputFile, fieldnames=['bulletin_id', 'bulletin_issuer', 'station_id',
'timestamp', 'modifier_type', 'modifier_sequence', 'temperature', 'dew_point_temperature',
'rel_humidity', 'wind_direction', 'wind_speed', 'gust_speed', 'station_pressure', 'pressure',
'cloud_cover', 'sun_duration', 'precipitation_amount', 'precipitation_duration', 'current_weather', 'snow_depth'],
quoting=csv.QUOTE_ALL, delimiter=',')
writer.writeheader()
for dataRow in settings.decodedData:
if dataRow['modifier'] != None:
dataRow['modifier_type'] = dataRow['modifier']['type']
dataRow['modifier_sequence'] = dataRow['modifier']['sequence']
else:
dataRow['modifier_type'] = None
dataRow['modifier_sequence'] = None
del dataRow['modifier']
if dataRow['precipitation'] != None:
# not possible to write more than one precipitation entry to CSV
for precip in dataRow['precipitation']:
if precip != None:
dataRow['precipitation_amount'] = precip['amount']
dataRow['precipitation_duration'] = precip['duration']
break
else:
dataRow['precipitation_amount'] = None
dataRow['precipitation_duration'] = None
del dataRow['precipitation']
writer.writerow(dataRow)
except IOError:
sys.exit('Could not open output file. Exiting.')
def writeSqliteOutput():
print()
print('Writing to Sqlite output container ' + settings.output + '...')
connection = sqlite3.connect(settings.output)
cursor = connection.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS station (
wmo INTEGER PRIMARY KEY,
icao TEXT,
lat REAL,
lon REAL,
ele REAL,
name TEXT,
int_name TEXT)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS synop_daily (
wmo INTEGER,
date TEXT,
min_temperature REAL,
max_temperature REAL,
precipitation REAL,
sun_duration REAL,
correction_sequence TEXT,
amendment_sequence TEXT,
PRIMARY KEY(wmo, date))
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS synop (
wmo INTEGER,
timestamp TEXT,
temperature REAL,
dew_point_temperature REAL,
rel_humidity REAL,
wind_direction INTEGER,
wind_speed REAL,
gust_speed REAL,
station_pressure REAL,
pressure REAL,
cloud_cover INTEGER,
sun_duration REAL,
current_weather INTEGER,
snow_depth REAL,
correction_sequence TEXT,
amendment_sequence TEXT,
PRIMARY KEY(wmo, timestamp))
''')
# todo precipitation
connection.commit()
stations = []
synop = []
for dataRow in settings.decodedData:
station = settings.stationInventory[dataRow['station_id']]
# make sure station ends up in list only once
duplicates = filter(lambda data: data[0] == dataRow['station_id'], stations)
if len(duplicates) == 0:
stations.append((station['wmo'], unicode(station['icao'], 'utf-8'),
station['lat'], station['lon'], station['ele'],
unicode(station['name'], 'utf-8'), unicode(station['int_name'], 'utf-8')))
# deal with amendments and corrections later
if dataRow['modifier'] == None or (dataRow['modifier']['type'] != 'AA' and dataRow['modifier']['type'] != 'CC'):
synop.append((dataRow['station_id'], dataRow['timestamp'], dataRow['temperature'], dataRow['dew_point_temperature'],
dataRow['rel_humidity'], dataRow['wind_direction'], dataRow['wind_speed'],
dataRow['gust_speed'], dataRow['station_pressure'], dataRow['pressure'],
dataRow['cloud_cover'], dataRow['sun_duration'], dataRow['current_weather'], dataRow['snow_depth'],
'', ''))
if dataRow['daily_precipitation'] != None:
if dataRow['timestamp'].hour >= 0 and dataRow['timestamp'].hour < 12:
date = dataRow['timestamp'] - datetime.timedelta(days=1)
else:
date = dataRow['timestamp']
station = dataRow['station_id']
cursor.execute('SELECT * FROM synop_daily WHERE wmo = ? AND date = ?', (station, date.strftime("%Y-%m-%d")))
if cursor.fetchone() != None:
cursor.execute('UPDATE synop_daily SET precipitation = ? WHERE wmo = ? AND date = ?',
(dataRow['daily_precipitation'], station, date.strftime("%Y-%m-%d")))
else:
cursor.execute('INSERT INTO synop_daily VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(station, date.strftime("%Y-%m-%d"), None, None, dataRow['daily_precipitation'], None, '', ''))
connection.commit()
if dataRow['daily_sun_duration'] != None:
date = dataRow['timestamp'] - datetime.timedelta(days=1)
station = dataRow['station_id']
cursor.execute('SELECT * FROM synop_daily WHERE wmo = ? AND date = ?', (station, date.strftime("%Y-%m-%d")))
if cursor.fetchone() != None:
cursor.execute('UPDATE synop_daily SET sun_duration = ? WHERE wmo = ? AND date = ?',
(dataRow['daily_sun_duration'], station, date.strftime("%Y-%m-%d")))
else:
cursor.execute('INSERT INTO synop_daily VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(station, date.strftime("%Y-%m-%d"), None, None, None, dataRow['daily_sun_duration'], '', ''))
connection.commit()
# IGNORE means that it does not fail if the key already exists
cursor.executemany('INSERT OR IGNORE INTO station VALUES (?, ?, ?, ?, ?, ?, ?)', stations)
cursor.executemany('INSERT OR IGNORE INTO synop VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', synop)
connection.commit()
amendments = filter(lambda data: data['modifier'] != None and data['modifier']['type'] == 'AA', settings.decodedData)
for amendment in amendments:
idTuple = (correction['station_id'], correction['timestamp'])
cursor.execute('SELECT * FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
result = cursor.fetchone()
# insert only if data is either not in the DB
# or the amendment sequence is not present (i.e. has not been amended so far)
# or the amendment sequence is lower (i.e. our amendment is newer)
if result == None or result[15] == None or result[15] < correction['modifier']['sequence']:
if result != None:
correctionSeq = result[14]
else:
correctionSeq = ''
cursor.execute('DELETE FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
cursor.execute('INSERT INTO synop VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(amendment['station_id'], amendment['timestamp'], amendment['temperature'], amendment['dew_point_temperature'],
amendment['rel_humidity'], amendment['wind_direction'], amendment['wind_speed'],
amendment['gust_speed'], amendment['station_pressure'], amendment['pressure'],
amendment['cloud_cover'], amendment['sun_duration'], amendment['current_weather'], amendment['snow_depth'],
amendmentSeq, amendment['modifier']['sequence']))
connection.commit()
corrections = filter(lambda data: data['modifier'] != None and data['modifier']['type'] == 'CC', settings.decodedData)
for correction in corrections:
idTuple = (correction['station_id'], correction['timestamp'])
cursor.execute('SELECT * FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
result = cursor.fetchone()
# insert only if data is either not in the DB
# or the correction sequence is not present (i.e. has not been corrected so far)
# or the correction sequence is lower (i.e. our correction is newer)
if result == None or result[14] == None or result[14] < correction['modifier']['sequence']:
if result != None:
amendmentSeq = result[15]
else:
amendmentSeq = ''
cursor.execute('DELETE FROM synop WHERE wmo = ? AND timestamp = ?', idTuple)
cursor.execute('INSERT INTO synop VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(correction['station_id'], correction['timestamp'], correction['temperature'], correction['dew_point_temperature'],
correction['rel_humidity'], correction['wind_direction'], correction['wind_speed'],
correction['gust_speed'], correction['station_pressure'], correction['pressure'],
correction['cloud_cover'], correction['sun_duration'], correction['current_weather'], correction['snow_depth'],
correction['modifier']['sequence'], amendmentSeq))
connection.commit()
connection.close() | en | 0.841966 | # CSV output provides a simple dumping of decoded values # advanced functions like correcting data according to bulletin modifiers will not be done # as the CSV file is newly created every time # also station information is not saved in the CSV file # not possible to write more than one precipitation entry to CSV CREATE TABLE IF NOT EXISTS station ( wmo INTEGER PRIMARY KEY, icao TEXT, lat REAL, lon REAL, ele REAL, name TEXT, int_name TEXT) CREATE TABLE IF NOT EXISTS synop_daily ( wmo INTEGER, date TEXT, min_temperature REAL, max_temperature REAL, precipitation REAL, sun_duration REAL, correction_sequence TEXT, amendment_sequence TEXT, PRIMARY KEY(wmo, date)) CREATE TABLE IF NOT EXISTS synop ( wmo INTEGER, timestamp TEXT, temperature REAL, dew_point_temperature REAL, rel_humidity REAL, wind_direction INTEGER, wind_speed REAL, gust_speed REAL, station_pressure REAL, pressure REAL, cloud_cover INTEGER, sun_duration REAL, current_weather INTEGER, snow_depth REAL, correction_sequence TEXT, amendment_sequence TEXT, PRIMARY KEY(wmo, timestamp)) # todo precipitation # make sure station ends up in list only once # deal with amendments and corrections later # IGNORE means that it does not fail if the key already exists # insert only if data is either not in the DB # or the amendment sequence is not present (i.e. has not been amended so far) # or the amendment sequence is lower (i.e. our amendment is newer) # insert only if data is either not in the DB # or the correction sequence is not present (i.e. has not been corrected so far) # or the correction sequence is lower (i.e. our correction is newer) | 3.454291 | 3 |
xcube/util/geom.py | tiagoams/xcube | 0 | 6622820 | # The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from typing import Optional, Union, Dict, Tuple, Sequence, Any
import affine
import numpy as np
import rasterio.features
import shapely.geometry
import shapely.geometry
import shapely.wkt
import xarray as xr
from .geojson import GeoJSON
from .update import update_dataset_spatial_attrs
GeometryLike = Union[shapely.geometry.base.BaseGeometry, Dict[str, Any], str, Sequence[Union[float, int]]]
Bounds = Tuple[float, float, float, float]
SplitBounds = Tuple[Bounds, Optional[Bounds]]
_INVALID_GEOMETRY_MSG = ('Geometry must be either a shapely geometry object, '
'a GeoJSON-serializable dictionary, a geometry WKT string, '
'box coordinates (x1, y1, x2, y2), '
'or point coordinates (x, y)')
_INVALID_BOX_COORDS_MSG = 'Invalid box coordinates'
def mask_dataset_by_geometry(dataset: xr.Dataset,
geometry: GeometryLike,
excluded_vars: Sequence[str] = None,
no_clip: bool = False,
save_geometry_mask: Union[str, bool] = False,
save_geometry_wkt: Union[str, bool] = False) -> Optional[xr.Dataset]:
"""
Mask a dataset according to the given geometry. The cells of variables of the
returned dataset will have NaN-values where their spatial coordinates are not intersecting
the given geometry.
:param dataset: The dataset
:param geometry: A geometry-like object, see py:function:`convert_geometry`.
:param excluded_vars: Optional sequence of names of data variables that should not be masked
(but still may be clipped).
:param no_clip: If True, the function will not clip the dataset before masking, this is, the
returned dataset will have the same dimension size as the given *dataset*.
:param save_geometry_mask: If the value is a string, the effective geometry mask array is stored as
a 2D data variable named by *save_geometry_mask*.
If the value is True, the name "geometry_mask" is used.
:param save_geometry_wkt: If the value is a string, the effective intersection geometry is stored as
a Geometry WKT string in the global attribute named by *save_geometry*.
If the value is True, the name "geometry_wkt" is used.
:return: The dataset spatial subset, or None if the bounding box of the dataset has a no or a zero area
intersection with the bounding box of the geometry.
"""
geometry = convert_geometry(geometry)
intersection_geometry = intersect_geometries(get_dataset_bounds(dataset), geometry)
if intersection_geometry is None:
return None
if not no_clip:
dataset = _clip_dataset_by_geometry(dataset, intersection_geometry)
ds_x_min, ds_y_min, ds_x_max, ds_y_max = get_dataset_bounds(dataset)
width = dataset.dims['lon']
height = dataset.dims['lat']
spatial_res = (ds_x_max - ds_x_min) / width
mask_data = get_geometry_mask(width, height, intersection_geometry, ds_x_min, ds_y_min, spatial_res)
mask = xr.DataArray(mask_data,
coords=dict(lat=dataset.lat, lon=dataset.lon),
dims=('lat', 'lon'))
dataset_vars = {}
for var_name, var in dataset.data_vars.items():
if not excluded_vars or var_name not in excluded_vars:
dataset_vars[var_name] = var.where(mask)
else:
dataset_vars[var_name] = var
masked_dataset = xr.Dataset(dataset_vars, coords=dataset.coords, attrs=dataset.attrs)
_save_geometry_mask(masked_dataset, mask, save_geometry_mask)
_save_geometry_wkt(masked_dataset, intersection_geometry, save_geometry_wkt)
return masked_dataset
def clip_dataset_by_geometry(dataset: xr.Dataset,
geometry: GeometryLike,
save_geometry_wkt: Union[str, bool] = False) -> Optional[xr.Dataset]:
"""
Spatially clip a dataset according to the bounding box of a given geometry.
:param dataset: The dataset
:param geometry: A geometry-like object, see py:function:`convert_geometry`.
:param save_geometry_wkt: If the value is a string, the effective intersection geometry is stored as
a Geometry WKT string in the global attribute named by *save_geometry*.
If the value is True, the name "geometry_wkt" is used.
:return: The dataset spatial subset, or None if the bounding box of the dataset has a no or a zero area
intersection with the bounding box of the geometry.
"""
intersection_geometry = intersect_geometries(get_dataset_bounds(dataset), geometry)
if intersection_geometry is None:
return None
return _clip_dataset_by_geometry(dataset, intersection_geometry, save_geometry_wkt=save_geometry_wkt)
def _clip_dataset_by_geometry(dataset: xr.Dataset,
intersection_geometry: shapely.geometry.base.BaseGeometry,
save_geometry_wkt: bool = False) -> Optional[xr.Dataset]:
# TODO (forman): the following code is wrong, if the dataset bounds cross the anti-meridian!
ds_x_min, ds_y_min, ds_x_max, ds_y_max = get_dataset_bounds(dataset)
width = dataset.lon.size
height = dataset.lat.size
res = (ds_y_max - ds_y_min) / height
g_lon_min, g_lat_min, g_lon_max, g_lat_max = intersection_geometry.bounds
x1 = _clamp(int(math.floor((g_lon_min - ds_x_min) / res)), 0, width - 1)
x2 = _clamp(int(math.ceil((g_lon_max - ds_x_min) / res)), 0, width - 1)
y1 = _clamp(int(math.floor((g_lat_min - ds_y_min) / res)), 0, height - 1)
y2 = _clamp(int(math.ceil((g_lat_max - ds_y_min) / res)), 0, height - 1)
if not is_dataset_y_axis_inverted(dataset):
_y1, _y2 = y1, y2
y1 = height - _y2 - 1
y2 = height - _y1 - 1
dataset_subset = dataset.isel(lon=slice(x1, x2), lat=slice(y1, y2))
update_dataset_spatial_attrs(dataset_subset, update_existing=True, in_place=True)
_save_geometry_wkt(dataset_subset, intersection_geometry, save_geometry_wkt)
return dataset_subset
def _save_geometry_mask(dataset, mask, save_mask):
if save_mask:
var_name = save_mask if isinstance(save_mask, str) else 'geometry_mask'
dataset[var_name] = mask
def _save_geometry_wkt(dataset, intersection_geometry, save_geometry):
if save_geometry:
attr_name = save_geometry if isinstance(save_geometry, str) else 'geometry_wkt'
dataset.attrs.update({attr_name: intersection_geometry.wkt})
def get_geometry_mask(width: int, height: int,
geometry: GeometryLike,
lon_min: float, lat_min: float, res: float) -> np.ndarray:
geometry = convert_geometry(geometry)
# noinspection PyTypeChecker
transform = affine.Affine(res, 0.0, lon_min,
0.0, -res, lat_min + res * height)
return rasterio.features.geometry_mask([geometry],
out_shape=(height, width),
transform=transform,
all_touched=True,
invert=True)
def intersect_geometries(geometry1: GeometryLike, geometry2: GeometryLike) \
-> Optional[shapely.geometry.base.BaseGeometry]:
geometry1 = convert_geometry(geometry1)
if geometry1 is None:
return None
geometry2 = convert_geometry(geometry2)
if geometry2 is None:
return geometry1
intersection_geometry = geometry1.intersection(geometry2)
if not intersection_geometry.is_valid or intersection_geometry.is_empty:
return None
return intersection_geometry
def convert_geometry(geometry: Optional[GeometryLike]) -> Optional[shapely.geometry.base.BaseGeometry]:
"""
Convert a geometry-like object into a shapely geometry object (``shapely.geometry.BaseGeometry``).
A geometry-like object is may be any shapely geometry object,
* a dictionary that can be serialized to valid GeoJSON,
* a WKT string,
* a box given by a string of the form "<x1>,<y1>,<x2>,<y2>"
or by a sequence of four numbers x1, y1, x2, y2,
* a point by a string of the form "<x>,<y>"
or by a sequence of two numbers x, y.
Handling of geometries crossing the antimeridian:
* If box coordinates are given, it is allowed to pass x1, x2 where x1 > x2,
which is interpreted as a box crossing the antimeridian. In this case the function
splits the box along the antimeridian and returns a multi-polygon.
* In all other cases, 2D geometries are assumed to _not cross the antimeridian at all_.
:param geometry: A geometry-like object
:return: Shapely geometry object or None.
"""
if isinstance(geometry, shapely.geometry.base.BaseGeometry):
return geometry
if isinstance(geometry, dict):
if GeoJSON.is_geometry(geometry):
return shapely.geometry.shape(geometry)
elif GeoJSON.is_feature(geometry):
geometry = GeoJSON.get_feature_geometry(geometry)
if geometry is not None:
return shapely.geometry.shape(geometry)
elif GeoJSON.is_feature_collection(geometry):
features = GeoJSON.get_feature_collection_features(geometry)
if features is not None:
geometries = [f2 for f2 in [GeoJSON.get_feature_geometry(f1) for f1 in features] if f2 is not None]
if geometries:
geometry = dict(type='GeometryCollection', geometries=geometries)
return shapely.geometry.shape(geometry)
raise ValueError(_INVALID_GEOMETRY_MSG)
if isinstance(geometry, str):
return shapely.wkt.loads(geometry)
if geometry is None:
return None
invalid_box_coords = False
# noinspection PyBroadException
try:
x1, y1, x2, y2 = geometry
is_point = x1 == x2 and y1 == y2
if is_point:
return shapely.geometry.Point(x1, y1)
invalid_box_coords = x1 == x2 or y1 >= y2
if not invalid_box_coords:
return get_box_split_bounds_geometry(x1, y1, x2, y2)
except Exception:
# noinspection PyBroadException
try:
x, y = geometry
return shapely.geometry.Point(x, y)
except Exception:
pass
if invalid_box_coords:
raise ValueError(_INVALID_BOX_COORDS_MSG)
raise ValueError(_INVALID_GEOMETRY_MSG)
def is_dataset_y_axis_inverted(dataset: Union[xr.Dataset, xr.DataArray]) -> bool:
if 'lat' in dataset.coords:
y = dataset.lat
elif 'y' in dataset.coords:
y = dataset.y
else:
raise ValueError("Neither 'lat' nor 'y' coordinate variable found.")
return float(y[0]) < float(y[-1])
def get_dataset_geometry(dataset: Union[xr.Dataset, xr.DataArray]) -> shapely.geometry.base.BaseGeometry:
return get_box_split_bounds_geometry(*get_dataset_bounds(dataset))
def get_dataset_bounds(dataset: Union[xr.Dataset, xr.DataArray]) -> Bounds:
lon_var = dataset.coords.get("lon")
lat_var = dataset.coords.get("lat")
if lon_var is None:
raise ValueError('Missing coordinate variable "lon"')
if lat_var is None:
raise ValueError('Missing coordinate variable "lat"')
lon_bnds_name = lon_var.attrs["bounds"] if "bounds" in lon_var.attrs else "lon_bnds"
if lon_bnds_name in dataset.coords:
lon_bnds_var = dataset.coords[lon_bnds_name]
lon_min = lon_bnds_var[0][0]
lon_max = lon_bnds_var[-1][1]
else:
lon_min = lon_var[0]
lon_max = lon_var[-1]
delta = min(abs(np.diff(lon_var)))
lon_min -= 0.5 * delta
lon_max += 0.5 * delta
lat_bnds_name = lat_var.attrs["bounds"] if "bounds" in lat_var.attrs else "lat_bnds"
if lat_bnds_name in dataset.coords:
lat_bnds_var = dataset.coords[lat_bnds_name]
lat1 = lat_bnds_var[0][0]
lat2 = lat_bnds_var[-1][1]
lat_min = min(lat1, lat2)
lat_max = max(lat1, lat2)
else:
lat1 = lat_var[0]
lat2 = lat_var[-1]
delta = min(abs(np.diff(lat_var)))
lat_min = min(lat1, lat2) - 0.5 * delta
lat_max = max(lat1, lat2) + 0.5 * delta
return float(lon_min), float(lat_min), float(lon_max), float(lat_max)
def get_box_split_bounds(lon_min: float, lat_min: float,
lon_max: float, lat_max: float) -> SplitBounds:
if lon_max >= lon_min:
return (lon_min, lat_min, lon_max, lat_max), None
else:
return (lon_min, lat_min, 180.0, lat_max), (-180.0, lat_min, lon_max, lat_max)
def get_box_split_bounds_geometry(lon_min: float, lat_min: float,
lon_max: float, lat_max: float) -> shapely.geometry.base.BaseGeometry:
box_1, box_2 = get_box_split_bounds(lon_min, lat_min, lon_max, lat_max)
if box_2 is not None:
return shapely.geometry.MultiPolygon(polygons=[shapely.geometry.box(*box_1), shapely.geometry.box(*box_2)])
else:
return shapely.geometry.box(*box_1)
def _clamp(x, x1, x2):
if x < x1:
return x1
if x > x2:
return x2
return x
| # The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from typing import Optional, Union, Dict, Tuple, Sequence, Any
import affine
import numpy as np
import rasterio.features
import shapely.geometry
import shapely.geometry
import shapely.wkt
import xarray as xr
from .geojson import GeoJSON
from .update import update_dataset_spatial_attrs
GeometryLike = Union[shapely.geometry.base.BaseGeometry, Dict[str, Any], str, Sequence[Union[float, int]]]
Bounds = Tuple[float, float, float, float]
SplitBounds = Tuple[Bounds, Optional[Bounds]]
_INVALID_GEOMETRY_MSG = ('Geometry must be either a shapely geometry object, '
'a GeoJSON-serializable dictionary, a geometry WKT string, '
'box coordinates (x1, y1, x2, y2), '
'or point coordinates (x, y)')
_INVALID_BOX_COORDS_MSG = 'Invalid box coordinates'
def mask_dataset_by_geometry(dataset: xr.Dataset,
geometry: GeometryLike,
excluded_vars: Sequence[str] = None,
no_clip: bool = False,
save_geometry_mask: Union[str, bool] = False,
save_geometry_wkt: Union[str, bool] = False) -> Optional[xr.Dataset]:
"""
Mask a dataset according to the given geometry. The cells of variables of the
returned dataset will have NaN-values where their spatial coordinates are not intersecting
the given geometry.
:param dataset: The dataset
:param geometry: A geometry-like object, see py:function:`convert_geometry`.
:param excluded_vars: Optional sequence of names of data variables that should not be masked
(but still may be clipped).
:param no_clip: If True, the function will not clip the dataset before masking, this is, the
returned dataset will have the same dimension size as the given *dataset*.
:param save_geometry_mask: If the value is a string, the effective geometry mask array is stored as
a 2D data variable named by *save_geometry_mask*.
If the value is True, the name "geometry_mask" is used.
:param save_geometry_wkt: If the value is a string, the effective intersection geometry is stored as
a Geometry WKT string in the global attribute named by *save_geometry*.
If the value is True, the name "geometry_wkt" is used.
:return: The dataset spatial subset, or None if the bounding box of the dataset has a no or a zero area
intersection with the bounding box of the geometry.
"""
geometry = convert_geometry(geometry)
intersection_geometry = intersect_geometries(get_dataset_bounds(dataset), geometry)
if intersection_geometry is None:
return None
if not no_clip:
dataset = _clip_dataset_by_geometry(dataset, intersection_geometry)
ds_x_min, ds_y_min, ds_x_max, ds_y_max = get_dataset_bounds(dataset)
width = dataset.dims['lon']
height = dataset.dims['lat']
spatial_res = (ds_x_max - ds_x_min) / width
mask_data = get_geometry_mask(width, height, intersection_geometry, ds_x_min, ds_y_min, spatial_res)
mask = xr.DataArray(mask_data,
coords=dict(lat=dataset.lat, lon=dataset.lon),
dims=('lat', 'lon'))
dataset_vars = {}
for var_name, var in dataset.data_vars.items():
if not excluded_vars or var_name not in excluded_vars:
dataset_vars[var_name] = var.where(mask)
else:
dataset_vars[var_name] = var
masked_dataset = xr.Dataset(dataset_vars, coords=dataset.coords, attrs=dataset.attrs)
_save_geometry_mask(masked_dataset, mask, save_geometry_mask)
_save_geometry_wkt(masked_dataset, intersection_geometry, save_geometry_wkt)
return masked_dataset
def clip_dataset_by_geometry(dataset: xr.Dataset,
geometry: GeometryLike,
save_geometry_wkt: Union[str, bool] = False) -> Optional[xr.Dataset]:
"""
Spatially clip a dataset according to the bounding box of a given geometry.
:param dataset: The dataset
:param geometry: A geometry-like object, see py:function:`convert_geometry`.
:param save_geometry_wkt: If the value is a string, the effective intersection geometry is stored as
a Geometry WKT string in the global attribute named by *save_geometry*.
If the value is True, the name "geometry_wkt" is used.
:return: The dataset spatial subset, or None if the bounding box of the dataset has a no or a zero area
intersection with the bounding box of the geometry.
"""
intersection_geometry = intersect_geometries(get_dataset_bounds(dataset), geometry)
if intersection_geometry is None:
return None
return _clip_dataset_by_geometry(dataset, intersection_geometry, save_geometry_wkt=save_geometry_wkt)
def _clip_dataset_by_geometry(dataset: xr.Dataset,
intersection_geometry: shapely.geometry.base.BaseGeometry,
save_geometry_wkt: bool = False) -> Optional[xr.Dataset]:
# TODO (forman): the following code is wrong, if the dataset bounds cross the anti-meridian!
ds_x_min, ds_y_min, ds_x_max, ds_y_max = get_dataset_bounds(dataset)
width = dataset.lon.size
height = dataset.lat.size
res = (ds_y_max - ds_y_min) / height
g_lon_min, g_lat_min, g_lon_max, g_lat_max = intersection_geometry.bounds
x1 = _clamp(int(math.floor((g_lon_min - ds_x_min) / res)), 0, width - 1)
x2 = _clamp(int(math.ceil((g_lon_max - ds_x_min) / res)), 0, width - 1)
y1 = _clamp(int(math.floor((g_lat_min - ds_y_min) / res)), 0, height - 1)
y2 = _clamp(int(math.ceil((g_lat_max - ds_y_min) / res)), 0, height - 1)
if not is_dataset_y_axis_inverted(dataset):
_y1, _y2 = y1, y2
y1 = height - _y2 - 1
y2 = height - _y1 - 1
dataset_subset = dataset.isel(lon=slice(x1, x2), lat=slice(y1, y2))
update_dataset_spatial_attrs(dataset_subset, update_existing=True, in_place=True)
_save_geometry_wkt(dataset_subset, intersection_geometry, save_geometry_wkt)
return dataset_subset
def _save_geometry_mask(dataset, mask, save_mask):
if save_mask:
var_name = save_mask if isinstance(save_mask, str) else 'geometry_mask'
dataset[var_name] = mask
def _save_geometry_wkt(dataset, intersection_geometry, save_geometry):
if save_geometry:
attr_name = save_geometry if isinstance(save_geometry, str) else 'geometry_wkt'
dataset.attrs.update({attr_name: intersection_geometry.wkt})
def get_geometry_mask(width: int, height: int,
geometry: GeometryLike,
lon_min: float, lat_min: float, res: float) -> np.ndarray:
geometry = convert_geometry(geometry)
# noinspection PyTypeChecker
transform = affine.Affine(res, 0.0, lon_min,
0.0, -res, lat_min + res * height)
return rasterio.features.geometry_mask([geometry],
out_shape=(height, width),
transform=transform,
all_touched=True,
invert=True)
def intersect_geometries(geometry1: GeometryLike, geometry2: GeometryLike) \
-> Optional[shapely.geometry.base.BaseGeometry]:
geometry1 = convert_geometry(geometry1)
if geometry1 is None:
return None
geometry2 = convert_geometry(geometry2)
if geometry2 is None:
return geometry1
intersection_geometry = geometry1.intersection(geometry2)
if not intersection_geometry.is_valid or intersection_geometry.is_empty:
return None
return intersection_geometry
def convert_geometry(geometry: Optional[GeometryLike]) -> Optional[shapely.geometry.base.BaseGeometry]:
"""
Convert a geometry-like object into a shapely geometry object (``shapely.geometry.BaseGeometry``).
A geometry-like object is may be any shapely geometry object,
* a dictionary that can be serialized to valid GeoJSON,
* a WKT string,
* a box given by a string of the form "<x1>,<y1>,<x2>,<y2>"
or by a sequence of four numbers x1, y1, x2, y2,
* a point by a string of the form "<x>,<y>"
or by a sequence of two numbers x, y.
Handling of geometries crossing the antimeridian:
* If box coordinates are given, it is allowed to pass x1, x2 where x1 > x2,
which is interpreted as a box crossing the antimeridian. In this case the function
splits the box along the antimeridian and returns a multi-polygon.
* In all other cases, 2D geometries are assumed to _not cross the antimeridian at all_.
:param geometry: A geometry-like object
:return: Shapely geometry object or None.
"""
if isinstance(geometry, shapely.geometry.base.BaseGeometry):
return geometry
if isinstance(geometry, dict):
if GeoJSON.is_geometry(geometry):
return shapely.geometry.shape(geometry)
elif GeoJSON.is_feature(geometry):
geometry = GeoJSON.get_feature_geometry(geometry)
if geometry is not None:
return shapely.geometry.shape(geometry)
elif GeoJSON.is_feature_collection(geometry):
features = GeoJSON.get_feature_collection_features(geometry)
if features is not None:
geometries = [f2 for f2 in [GeoJSON.get_feature_geometry(f1) for f1 in features] if f2 is not None]
if geometries:
geometry = dict(type='GeometryCollection', geometries=geometries)
return shapely.geometry.shape(geometry)
raise ValueError(_INVALID_GEOMETRY_MSG)
if isinstance(geometry, str):
return shapely.wkt.loads(geometry)
if geometry is None:
return None
invalid_box_coords = False
# noinspection PyBroadException
try:
x1, y1, x2, y2 = geometry
is_point = x1 == x2 and y1 == y2
if is_point:
return shapely.geometry.Point(x1, y1)
invalid_box_coords = x1 == x2 or y1 >= y2
if not invalid_box_coords:
return get_box_split_bounds_geometry(x1, y1, x2, y2)
except Exception:
# noinspection PyBroadException
try:
x, y = geometry
return shapely.geometry.Point(x, y)
except Exception:
pass
if invalid_box_coords:
raise ValueError(_INVALID_BOX_COORDS_MSG)
raise ValueError(_INVALID_GEOMETRY_MSG)
def is_dataset_y_axis_inverted(dataset: Union[xr.Dataset, xr.DataArray]) -> bool:
if 'lat' in dataset.coords:
y = dataset.lat
elif 'y' in dataset.coords:
y = dataset.y
else:
raise ValueError("Neither 'lat' nor 'y' coordinate variable found.")
return float(y[0]) < float(y[-1])
def get_dataset_geometry(dataset: Union[xr.Dataset, xr.DataArray]) -> shapely.geometry.base.BaseGeometry:
return get_box_split_bounds_geometry(*get_dataset_bounds(dataset))
def get_dataset_bounds(dataset: Union[xr.Dataset, xr.DataArray]) -> Bounds:
lon_var = dataset.coords.get("lon")
lat_var = dataset.coords.get("lat")
if lon_var is None:
raise ValueError('Missing coordinate variable "lon"')
if lat_var is None:
raise ValueError('Missing coordinate variable "lat"')
lon_bnds_name = lon_var.attrs["bounds"] if "bounds" in lon_var.attrs else "lon_bnds"
if lon_bnds_name in dataset.coords:
lon_bnds_var = dataset.coords[lon_bnds_name]
lon_min = lon_bnds_var[0][0]
lon_max = lon_bnds_var[-1][1]
else:
lon_min = lon_var[0]
lon_max = lon_var[-1]
delta = min(abs(np.diff(lon_var)))
lon_min -= 0.5 * delta
lon_max += 0.5 * delta
lat_bnds_name = lat_var.attrs["bounds"] if "bounds" in lat_var.attrs else "lat_bnds"
if lat_bnds_name in dataset.coords:
lat_bnds_var = dataset.coords[lat_bnds_name]
lat1 = lat_bnds_var[0][0]
lat2 = lat_bnds_var[-1][1]
lat_min = min(lat1, lat2)
lat_max = max(lat1, lat2)
else:
lat1 = lat_var[0]
lat2 = lat_var[-1]
delta = min(abs(np.diff(lat_var)))
lat_min = min(lat1, lat2) - 0.5 * delta
lat_max = max(lat1, lat2) + 0.5 * delta
return float(lon_min), float(lat_min), float(lon_max), float(lat_max)
def get_box_split_bounds(lon_min: float, lat_min: float,
lon_max: float, lat_max: float) -> SplitBounds:
if lon_max >= lon_min:
return (lon_min, lat_min, lon_max, lat_max), None
else:
return (lon_min, lat_min, 180.0, lat_max), (-180.0, lat_min, lon_max, lat_max)
def get_box_split_bounds_geometry(lon_min: float, lat_min: float,
lon_max: float, lat_max: float) -> shapely.geometry.base.BaseGeometry:
box_1, box_2 = get_box_split_bounds(lon_min, lat_min, lon_max, lat_max)
if box_2 is not None:
return shapely.geometry.MultiPolygon(polygons=[shapely.geometry.box(*box_1), shapely.geometry.box(*box_2)])
else:
return shapely.geometry.box(*box_1)
def _clamp(x, x1, x2):
if x < x1:
return x1
if x > x2:
return x2
return x
| en | 0.775834 | # The MIT License (MIT) # Copyright (c) 2019 by the xcube development team and contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Mask a dataset according to the given geometry. The cells of variables of the returned dataset will have NaN-values where their spatial coordinates are not intersecting the given geometry. :param dataset: The dataset :param geometry: A geometry-like object, see py:function:`convert_geometry`. :param excluded_vars: Optional sequence of names of data variables that should not be masked (but still may be clipped). :param no_clip: If True, the function will not clip the dataset before masking, this is, the returned dataset will have the same dimension size as the given *dataset*. :param save_geometry_mask: If the value is a string, the effective geometry mask array is stored as a 2D data variable named by *save_geometry_mask*. If the value is True, the name "geometry_mask" is used. :param save_geometry_wkt: If the value is a string, the effective intersection geometry is stored as a Geometry WKT string in the global attribute named by *save_geometry*. If the value is True, the name "geometry_wkt" is used. :return: The dataset spatial subset, or None if the bounding box of the dataset has a no or a zero area intersection with the bounding box of the geometry. Spatially clip a dataset according to the bounding box of a given geometry. :param dataset: The dataset :param geometry: A geometry-like object, see py:function:`convert_geometry`. :param save_geometry_wkt: If the value is a string, the effective intersection geometry is stored as a Geometry WKT string in the global attribute named by *save_geometry*. If the value is True, the name "geometry_wkt" is used. :return: The dataset spatial subset, or None if the bounding box of the dataset has a no or a zero area intersection with the bounding box of the geometry. # TODO (forman): the following code is wrong, if the dataset bounds cross the anti-meridian! # noinspection PyTypeChecker Convert a geometry-like object into a shapely geometry object (``shapely.geometry.BaseGeometry``). A geometry-like object is may be any shapely geometry object, * a dictionary that can be serialized to valid GeoJSON, * a WKT string, * a box given by a string of the form "<x1>,<y1>,<x2>,<y2>" or by a sequence of four numbers x1, y1, x2, y2, * a point by a string of the form "<x>,<y>" or by a sequence of two numbers x, y. Handling of geometries crossing the antimeridian: * If box coordinates are given, it is allowed to pass x1, x2 where x1 > x2, which is interpreted as a box crossing the antimeridian. In this case the function splits the box along the antimeridian and returns a multi-polygon. * In all other cases, 2D geometries are assumed to _not cross the antimeridian at all_. :param geometry: A geometry-like object :return: Shapely geometry object or None. # noinspection PyBroadException # noinspection PyBroadException | 1.602768 | 2 |
devops/__init__.py | pcah/python-clean-architecture | 278 | 6622821 | <reponame>pcah/python-clean-architecture<gh_stars>100-1000
# flake8: noqa
from . import commands
PROJECT_PACKAGE = None
| # flake8: noqa
from . import commands
PROJECT_PACKAGE = None | it | 0.238973 | # flake8: noqa | 1.00178 | 1 |
youths/admin.py | frwickst/open-city-profile | 0 | 6622822 | <gh_stars>0
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from youths.models import YouthProfile
class YouthProfileAdminInline(admin.StackedInline):
model = YouthProfile
fk_name = "profile"
extra = 0
readonly_fields = ("approved_time", "approval_notification_timestamp")
fieldsets = (
(
_("Youth profile basic information"),
{
"fields": (
"profile",
"ssn",
"school_name",
"school_class",
"expiration",
"preferred_language",
"volunteer_info",
"gender",
"notes",
)
},
),
(
_("Youth profile illnesses"),
{
"fields": (
"diabetes",
"epilepsy",
"heart_disease",
"serious_allergies",
"allergies",
"extra_illnesses_info",
)
},
),
(
_("Youth profile permissions"),
{
"fields": (
"approver_email",
"approval_notification_timestamp",
"approved_time",
"photo_usage_approved",
)
},
),
)
| from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from youths.models import YouthProfile
class YouthProfileAdminInline(admin.StackedInline):
model = YouthProfile
fk_name = "profile"
extra = 0
readonly_fields = ("approved_time", "approval_notification_timestamp")
fieldsets = (
(
_("Youth profile basic information"),
{
"fields": (
"profile",
"ssn",
"school_name",
"school_class",
"expiration",
"preferred_language",
"volunteer_info",
"gender",
"notes",
)
},
),
(
_("Youth profile illnesses"),
{
"fields": (
"diabetes",
"epilepsy",
"heart_disease",
"serious_allergies",
"allergies",
"extra_illnesses_info",
)
},
),
(
_("Youth profile permissions"),
{
"fields": (
"approver_email",
"approval_notification_timestamp",
"approved_time",
"photo_usage_approved",
)
},
),
) | none | 1 | 1.862009 | 2 | |
data_processing/data_creation.py | KartikaySrivadtava/dl-for-har-ea1e9babb2b178cc338dbc72db974325c193c781 | 1 | 6622823 | <filename>data_processing/data_creation.py<gh_stars>1-10
from PyAV.av.io import read
import re
import numpy as np
import pandas as pd
from glob import glob
import os
from io import BytesIO
import zipfile
def milliseconds_to_hertz(start, end, rate):
"""
Function which converts milliseconds to hertz timestamps
:param start: start time in milliseconds
:param end: end time in milliseconds
:param rate: employed sampling rate during recording
:return: start and end time in hertz
"""
adjusted_rate = rate / 1000
return int(np.floor(float(start) * adjusted_rate)), int(np.floor(float(end) * adjusted_rate))
def create_wetlab_data_from_mkvs(feature_tracks, label_tracks, directory, sample_rate):
"""
Funtion which creates the a csv file using the WetLab mkv dataset files as input.
:param feature_tracks: tracks which contain the features that are to be used from the wetlab mkvs
:param label_tracks: tracks which contain the labels that are to be used from the wetlab mkvs
:param directory: directory where the resulting csv is to be saved to
:param sample_rate: sampling rate
:return: pandas dataframe containing wetlab features
"""
filenames = sorted(glob(os.path.join(directory, '*.mkv')))
# obtain unique labels
unique_labels = []
for filename in filenames:
unique_labels = np.unique(
np.concatenate((unique_labels, np.vstack(read(label_tracks, file=filename)[0])[:, 2])))
output = pd.DataFrame()
for i, filename in enumerate(filenames):
features = np.vstack(read(feature_tracks, file=filename)[0])
features = features / 2 ** 16 * 8 * 9.81
labels = np.vstack(read(label_tracks, file=filename)[0])
idx = np.full(len(features), i)
feat_output = pd.DataFrame(np.concatenate((np.array(idx)[:, None], features), axis=1))
action_label_output = pd.DataFrame(np.full(len(features), 0))
tasks_label_output = pd.DataFrame(np.full(len(features), 0))
for label_triplet in labels:
start, end = milliseconds_to_hertz(label_triplet[0], label_triplet[1], sample_rate)
if any(char.isdigit() for char in label_triplet[2]):
tasks_label_output[start:end] = label_triplet[2]
else:
action_label_output[start:end] = label_triplet[2]
temp_output = pd.concat((feat_output, action_label_output, tasks_label_output), axis=1)
if i == 0:
output = temp_output
else:
output = pd.concat((output, temp_output), axis=0)
print("Processed file: {0}".format(filename))
print("Value counts (Actions): ")
print(output.iloc[:, -2].value_counts())
print("Value counts (Tasks): ")
print(output.iloc[:, -1].value_counts())
return output
def create_sbhar_dataset(folder):
labels = np.loadtxt(os.path.join(folder, 'labels.txt'), delimiter=' ')
acc_data = [f for f in os.listdir(folder) if 'acc' in f]
# gyro_data = [f for f in os.listdir(folder) if 'gyro' in f]
output_data = None
for sbj in range(30):
if sbj < 9:
acc_sbj_files = [f for f in acc_data if 'user0' + str(sbj + 1) in f]
# gyro_sbj_files = [f for f in gyro_data if 'user0' + str(sbj + 1) in f]
else:
acc_sbj_files = [f for f in acc_data if 'user' + str(sbj + 1) in f]
# gyro_sbj_files = [f for f in gyro_data if 'user' + str(sbj + 1) in f]
sbj_data = None
# acc + gyro
for acc_sbj_file in acc_sbj_files:
acc_tmp_data = np.loadtxt(os.path.join(folder, acc_sbj_file), delimiter=' ')
sbj = re.sub('[^0-9]', '', acc_sbj_file.split('_')[2])
exp = re.sub('[^0-9]', '', acc_sbj_file.split('_')[1])
# gyro_tmp_data = np.loadtxt(os.path.join(folder, 'gyro_exp' + exp + '_user' + sbj + '.txt'), delimiter=' ')
sbj_labels = labels[(labels[:, 0] == int(exp)) & (labels[:, 1] == int(sbj))]
# tmp_data = np.concatenate((acc_tmp_data, gyro_tmp_data), axis=1)
tmp_data = np.concatenate((acc_tmp_data, np.zeros(acc_tmp_data.shape[0])[:, None]), axis=1)
for label_triplet in sbj_labels:
tmp_data[int(label_triplet[3]):int(label_triplet[4] + 1), -1] = label_triplet[2]
tmp_data = np.concatenate((np.full(tmp_data.shape[0], int(sbj) - 1)[:, None], tmp_data), axis=1)
if sbj_data is None:
sbj_data = tmp_data
else:
sbj_data = np.concatenate((sbj_data, tmp_data), axis=0)
if output_data is None:
output_data = sbj_data
else:
output_data = np.concatenate((output_data, sbj_data), axis=0)
return pd.DataFrame(output_data, index=None)
def create_hhar_dataset(folder):
data = pd.read_csv(os.path.join(folder, 'Watch_accelerometer.csv'))
user_dict = {
'a': 0.0,
'b': 1.0,
'c': 2.0,
'd': 3.0,
'e': 4.0,
'f': 5.0,
'g': 6.0,
'h': 7.0,
'i': 8.0,
}
data = data.replace({"User": user_dict})
data = data[['User', 'x', 'y', 'z', 'gt']]
data = data.fillna(0)
return data
def create_rwhar_dataset(folder):
"""
Author : <NAME>, <EMAIL>
:return:
"""
RWHAR_ACTIVITY_NUM = {
"climbingdown": 1,
"climbingup": 2,
"jumping": 3,
"lying": 4,
"running": 5,
"sitting": 6,
"standing": 7,
"walking": 8,
}
RWHAR_BAND_LOCATION = {
"chest": 1,
"forearm": 2,
"head": 3,
"shin": 4,
"thigh": 5,
"upperarm": 6,
"waist": 7,
}
def check_rwhar_zip(path):
# verify that the path is to the zip containing csv and not another zip of csv
if any(".zip" in filename for filename in zipfile.ZipFile(path, "r").namelist()):
# There are multiple zips in some cases
with zipfile.ZipFile(path, "r") as temp:
path = BytesIO(temp.read(
max(temp.namelist()))) # max chosen so the exact same acc and gyr files are selected each time (repeatability)
return path
def rwhar_load_csv(path):
# Loads up the csv at given path, returns a dictionary of data at each location
path = check_rwhar_zip(path)
tables_dict = {}
with zipfile.ZipFile(path, "r") as Zip:
zip_files = Zip.namelist()
for csv in zip_files:
if "csv" in csv:
location = RWHAR_BAND_LOCATION[
csv[csv.rfind("_") + 1:csv.rfind(".")]] # location is between last _ and .csv extension
sensor = csv[:3]
prefix = sensor.lower() + "_"
table = pd.read_csv(Zip.open(csv))
table.rename(columns={"attr_x": prefix + "x",
"attr_y": prefix + "y",
"attr_z": prefix + "z",
"attr_time": "timestamp",
}, inplace=True)
table.drop(columns="id", inplace=True)
tables_dict[location] = table
return tables_dict
def rwhar_load_table_activity(path_acc):
# Logic for loading each activity zip file for acc and gyr and then merging the tables at each location
acc_tables = rwhar_load_csv(path_acc)
data = pd.DataFrame()
for loc in acc_tables.keys():
acc_tab = acc_tables[loc]
acc_tab = pd.DataFrame(acc_tab)
acc_tab["location"] = loc
data = data.append(acc_tab)
return data
def clean_rwhar(filepath, sel_location=None):
# the function reads the files in RWHAR dataset and each subject and each activity labelled in a panda table
# filepath is the parent folder containing all the RWHAR dataset.
# Note: all entries are loaded but their timestamps are not syncronised. So a single location must be selected and
# all entries with NA must be dropped.
subject_dir = os.listdir(filepath)
dataset = pd.DataFrame()
for sub in subject_dir:
if "proband" not in sub:
continue
# files = os.listdir(filepath+sub)
# files = [file for file in files if (("acc" in file or "gyr" in file) and "csv" in file)]
subject_num = int(sub[7:]) - 1 # proband is 7 letters long so subject num is number following that
sub_pd = pd.DataFrame()
for activity in RWHAR_ACTIVITY_NUM.keys(): # pair the acc and gyr zips of the same activity
activity_name = "_" + activity + "_csv.zip"
path_acc = filepath + '/' + sub + "/acc" + activity_name # concat the path to acc file for given activity and subject
table = rwhar_load_table_activity(path_acc)
table["activity"] = RWHAR_ACTIVITY_NUM[activity] # add a activity column and fill it with activity num
sub_pd = sub_pd.append(table)
sub_pd["subject"] = subject_num # add subject id to all entries
dataset = dataset.append(sub_pd)
dataset = dataset.dropna()
dataset = dataset[dataset.location == RWHAR_BAND_LOCATION[sel_location]]
if sel_location is not None:
print("Selecting location : ", sel_location)
dataset = dataset[dataset.location == RWHAR_BAND_LOCATION[sel_location]]
dataset = dataset.drop(columns="location")
dataset = dataset.sort_values(by=['subject', 'timestamp'])
dataset = dataset.drop(columns="timestamp")
dataset = dataset.dropna()
print(dataset['activity'].value_counts())
return dataset
data = clean_rwhar(folder, sel_location='forearm')
data = data[['subject', 'acc_x', 'acc_y', 'acc_z', 'activity']]
return data
def create_opportunity_dataset(folder, output_folder):
# Hardcoded number of sensor channels employed in the OPPORTUNITY challenge
NB_SENSOR_CHANNELS = 113
# Hardcoded names of the files defining the OPPORTUNITY challenge data. As named in the original data.
OPPORTUNITY_DATA_FILES = [# Ordonez training
(0, 'OpportunityUCIDataset/dataset/S1-Drill.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL1.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL2.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL3.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL4.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL5.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-Drill.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-ADL1.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-ADL2.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-Drill.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL1.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL2.dat'),
# Ordonez validation
(1, 'OpportunityUCIDataset/dataset/S2-ADL3.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL3.dat'),
# Ordonez testing
(1, 'OpportunityUCIDataset/dataset/S2-ADL4.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-ADL5.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL4.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL5.dat'),
# additional data
(3, 'OpportunityUCIDataset/dataset/S4-ADL1.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL2.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL3.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL4.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL5.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-Drill.dat')
]
# Hardcoded thresholds to define global maximums and minimums for every one of the 113 sensor channels employed in the
# OPPORTUNITY challenge
NORM_MAX_THRESHOLDS = [3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
250, 25, 200, 5000, 5000, 5000, 5000, 5000, 5000,
10000, 10000, 10000, 10000, 10000, 10000, 250, 250, 25,
200, 5000, 5000, 5000, 5000, 5000, 5000, 10000, 10000,
10000, 10000, 10000, 10000, 250, ]
NORM_MIN_THRESHOLDS = [-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-250, -100, -200, -5000, -5000, -5000, -5000, -5000, -5000,
-10000, -10000, -10000, -10000, -10000, -10000, -250, -250, -100,
-200, -5000, -5000, -5000, -5000, -5000, -5000, -10000, -10000,
-10000, -10000, -10000, -10000, -250, ]
def select_columns_opp(data):
"""Selection of the 113 columns employed in the OPPORTUNITY challenge
:param data: numpy integer matrix
Sensor data (all features)
:return: numpy integer matrix
Selection of features
"""
# included-excluded
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, np.arange(59, 63)])
features_delete = np.concatenate([features_delete, np.arange(72, 76)])
features_delete = np.concatenate([features_delete, np.arange(85, 89)])
features_delete = np.concatenate([features_delete, np.arange(98, 102)])
features_delete = np.concatenate([features_delete, np.arange(134, 243)])
features_delete = np.concatenate([features_delete, np.arange(244, 249)])
return np.delete(data, features_delete, 1)
def normalize(data, max_list, min_list):
"""Normalizes all sensor channels
:param data: numpy integer matrix
Sensor data
:param max_list: numpy integer array
Array containing maximums values for every one of the 113 sensor channels
:param min_list: numpy integer array
Array containing minimum values for every one of the 113 sensor channels
:return:
Normalized sensor data
"""
max_list, min_list = np.array(max_list), np.array(min_list)
diffs = max_list - min_list
for i in np.arange(data.shape[1]):
data[:, i] = (data[:, i] - min_list[i]) / diffs[i]
# Checking the boundaries
data[data > 1] = 0.99
data[data < 0] = 0.00
return data
def adjust_idx_labels(data_y, label):
"""Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
if label == 'locomotion': # Labels for locomotion are adjusted
data_y[data_y == 4] = 3
data_y[data_y == 5] = 4
elif label == 'gestures': # Labels for gestures are adjusted
data_y[data_y == 406516] = 1
data_y[data_y == 406517] = 2
data_y[data_y == 404516] = 3
data_y[data_y == 404517] = 4
data_y[data_y == 406520] = 5
data_y[data_y == 404520] = 6
data_y[data_y == 406505] = 7
data_y[data_y == 404505] = 8
data_y[data_y == 406519] = 9
data_y[data_y == 404519] = 10
data_y[data_y == 406511] = 11
data_y[data_y == 404511] = 12
data_y[data_y == 406508] = 13
data_y[data_y == 404508] = 14
data_y[data_y == 408512] = 15
data_y[data_y == 407521] = 16
data_y[data_y == 405506] = 17
return data_y
def process_dataset_file(data):
"""Function defined as a pipeline to process individual OPPORTUNITY files
:param data: numpy integer matrix
Matrix containing data samples (rows) for every sensor channel (column)
:return: numpy integer matrix, numy integer array
Processed sensor data, segmented into features (x) and labels (y)
"""
# Select correct columns
data = select_columns_opp(data)
data = data[:, 1:]
# adjust labels to be counting from 1
data[:, 113] = adjust_idx_labels(data[:, 113], 'locomotion').astype(int)
data[:, 114] = adjust_idx_labels(data[:, 114], 'gestures').astype(int)
# Perform linear interpolation
data[:, :113] = np.array([pd.Series(i).interpolate() for i in data[:, :113].T]).T
# Remaining missing data are converted to zero
data[:, :113][np.isnan(data[:, :113])] = 0
# All sensor channels are normalized
data[:, :113] = normalize(data[:, :113], NORM_MAX_THRESHOLDS, NORM_MIN_THRESHOLDS)
return data
def generate_data(dataset, target_filename):
"""Function to read the OPPORTUNITY challenge raw data and process all sensor channels
:param dataset: string
Path with original OPPORTUNITY zip file
:param target_filename: string
Processed file
"""
full = np.empty((0, NB_SENSOR_CHANNELS+3))
zf = zipfile.ZipFile(dataset)
print('Processing dataset files ...')
for sbj, filename in OPPORTUNITY_DATA_FILES:
try:
data = np.loadtxt(BytesIO(zf.read(filename)))
print('... file {0}'.format(filename))
_out = process_dataset_file(data)
_sbj = np.full((len(_out), 1), sbj)
_out = np.concatenate((_sbj, _out), axis=1)
full = np.vstack((full, _out))
print(filename, full.shape)
except KeyError:
print('ERROR: Did not find {0} in zip file'.format(filename))
# Dataset is segmented into train and test
nb_test_samples = 676713
print("Final dataset with size: {0} ".format(full.shape))
# write full dataset
pd.DataFrame(full, index=None).to_csv(os.path.join(target_filename + '_data.csv'), index=False, header=False)
# write Ordonez split
pd.DataFrame(full[:nb_test_samples, :], index=None).to_csv(target_filename + '_ordonez_data.csv', index=False, header=False)
generate_data(os.path.join(folder, 'OpportunityUCIDataset.zip'), output_folder)
if __name__ == '__main__':
# opportunity
create_opportunity_dataset('../data/raw/opportunity', '../data/opportunity')
# wetlab
feat = lambda streams: [s for s in streams if s.type == "audio"]
label = lambda streams: [s for s in streams if s.type == "subtitle"]
create_wetlab_data_from_mkvs(feat, label, '../data/raw/wetlab', 50).to_csv(
'../data/wetlab_data.csv', index=False, header=False)
# sbhar
create_sbhar_dataset('../data/raw/sbhar').to_csv(
'../data/sbhar_data.csv', index=False, header=False)
# hhar
create_hhar_dataset('../data/raw/hhar').to_csv(
'../data/hhar_data.csv', index=False, header=False)
# rwhar
create_rwhar_dataset('../data/raw/rwhar').to_csv(
'../data/rwhar_data.csv', index=False, header=False)
| <filename>data_processing/data_creation.py<gh_stars>1-10
from PyAV.av.io import read
import re
import numpy as np
import pandas as pd
from glob import glob
import os
from io import BytesIO
import zipfile
def milliseconds_to_hertz(start, end, rate):
"""
Function which converts milliseconds to hertz timestamps
:param start: start time in milliseconds
:param end: end time in milliseconds
:param rate: employed sampling rate during recording
:return: start and end time in hertz
"""
adjusted_rate = rate / 1000
return int(np.floor(float(start) * adjusted_rate)), int(np.floor(float(end) * adjusted_rate))
def create_wetlab_data_from_mkvs(feature_tracks, label_tracks, directory, sample_rate):
"""
Funtion which creates the a csv file using the WetLab mkv dataset files as input.
:param feature_tracks: tracks which contain the features that are to be used from the wetlab mkvs
:param label_tracks: tracks which contain the labels that are to be used from the wetlab mkvs
:param directory: directory where the resulting csv is to be saved to
:param sample_rate: sampling rate
:return: pandas dataframe containing wetlab features
"""
filenames = sorted(glob(os.path.join(directory, '*.mkv')))
# obtain unique labels
unique_labels = []
for filename in filenames:
unique_labels = np.unique(
np.concatenate((unique_labels, np.vstack(read(label_tracks, file=filename)[0])[:, 2])))
output = pd.DataFrame()
for i, filename in enumerate(filenames):
features = np.vstack(read(feature_tracks, file=filename)[0])
features = features / 2 ** 16 * 8 * 9.81
labels = np.vstack(read(label_tracks, file=filename)[0])
idx = np.full(len(features), i)
feat_output = pd.DataFrame(np.concatenate((np.array(idx)[:, None], features), axis=1))
action_label_output = pd.DataFrame(np.full(len(features), 0))
tasks_label_output = pd.DataFrame(np.full(len(features), 0))
for label_triplet in labels:
start, end = milliseconds_to_hertz(label_triplet[0], label_triplet[1], sample_rate)
if any(char.isdigit() for char in label_triplet[2]):
tasks_label_output[start:end] = label_triplet[2]
else:
action_label_output[start:end] = label_triplet[2]
temp_output = pd.concat((feat_output, action_label_output, tasks_label_output), axis=1)
if i == 0:
output = temp_output
else:
output = pd.concat((output, temp_output), axis=0)
print("Processed file: {0}".format(filename))
print("Value counts (Actions): ")
print(output.iloc[:, -2].value_counts())
print("Value counts (Tasks): ")
print(output.iloc[:, -1].value_counts())
return output
def create_sbhar_dataset(folder):
labels = np.loadtxt(os.path.join(folder, 'labels.txt'), delimiter=' ')
acc_data = [f for f in os.listdir(folder) if 'acc' in f]
# gyro_data = [f for f in os.listdir(folder) if 'gyro' in f]
output_data = None
for sbj in range(30):
if sbj < 9:
acc_sbj_files = [f for f in acc_data if 'user0' + str(sbj + 1) in f]
# gyro_sbj_files = [f for f in gyro_data if 'user0' + str(sbj + 1) in f]
else:
acc_sbj_files = [f for f in acc_data if 'user' + str(sbj + 1) in f]
# gyro_sbj_files = [f for f in gyro_data if 'user' + str(sbj + 1) in f]
sbj_data = None
# acc + gyro
for acc_sbj_file in acc_sbj_files:
acc_tmp_data = np.loadtxt(os.path.join(folder, acc_sbj_file), delimiter=' ')
sbj = re.sub('[^0-9]', '', acc_sbj_file.split('_')[2])
exp = re.sub('[^0-9]', '', acc_sbj_file.split('_')[1])
# gyro_tmp_data = np.loadtxt(os.path.join(folder, 'gyro_exp' + exp + '_user' + sbj + '.txt'), delimiter=' ')
sbj_labels = labels[(labels[:, 0] == int(exp)) & (labels[:, 1] == int(sbj))]
# tmp_data = np.concatenate((acc_tmp_data, gyro_tmp_data), axis=1)
tmp_data = np.concatenate((acc_tmp_data, np.zeros(acc_tmp_data.shape[0])[:, None]), axis=1)
for label_triplet in sbj_labels:
tmp_data[int(label_triplet[3]):int(label_triplet[4] + 1), -1] = label_triplet[2]
tmp_data = np.concatenate((np.full(tmp_data.shape[0], int(sbj) - 1)[:, None], tmp_data), axis=1)
if sbj_data is None:
sbj_data = tmp_data
else:
sbj_data = np.concatenate((sbj_data, tmp_data), axis=0)
if output_data is None:
output_data = sbj_data
else:
output_data = np.concatenate((output_data, sbj_data), axis=0)
return pd.DataFrame(output_data, index=None)
def create_hhar_dataset(folder):
data = pd.read_csv(os.path.join(folder, 'Watch_accelerometer.csv'))
user_dict = {
'a': 0.0,
'b': 1.0,
'c': 2.0,
'd': 3.0,
'e': 4.0,
'f': 5.0,
'g': 6.0,
'h': 7.0,
'i': 8.0,
}
data = data.replace({"User": user_dict})
data = data[['User', 'x', 'y', 'z', 'gt']]
data = data.fillna(0)
return data
def create_rwhar_dataset(folder):
"""
Author : <NAME>, <EMAIL>
:return:
"""
RWHAR_ACTIVITY_NUM = {
"climbingdown": 1,
"climbingup": 2,
"jumping": 3,
"lying": 4,
"running": 5,
"sitting": 6,
"standing": 7,
"walking": 8,
}
RWHAR_BAND_LOCATION = {
"chest": 1,
"forearm": 2,
"head": 3,
"shin": 4,
"thigh": 5,
"upperarm": 6,
"waist": 7,
}
def check_rwhar_zip(path):
# verify that the path is to the zip containing csv and not another zip of csv
if any(".zip" in filename for filename in zipfile.ZipFile(path, "r").namelist()):
# There are multiple zips in some cases
with zipfile.ZipFile(path, "r") as temp:
path = BytesIO(temp.read(
max(temp.namelist()))) # max chosen so the exact same acc and gyr files are selected each time (repeatability)
return path
def rwhar_load_csv(path):
# Loads up the csv at given path, returns a dictionary of data at each location
path = check_rwhar_zip(path)
tables_dict = {}
with zipfile.ZipFile(path, "r") as Zip:
zip_files = Zip.namelist()
for csv in zip_files:
if "csv" in csv:
location = RWHAR_BAND_LOCATION[
csv[csv.rfind("_") + 1:csv.rfind(".")]] # location is between last _ and .csv extension
sensor = csv[:3]
prefix = sensor.lower() + "_"
table = pd.read_csv(Zip.open(csv))
table.rename(columns={"attr_x": prefix + "x",
"attr_y": prefix + "y",
"attr_z": prefix + "z",
"attr_time": "timestamp",
}, inplace=True)
table.drop(columns="id", inplace=True)
tables_dict[location] = table
return tables_dict
def rwhar_load_table_activity(path_acc):
# Logic for loading each activity zip file for acc and gyr and then merging the tables at each location
acc_tables = rwhar_load_csv(path_acc)
data = pd.DataFrame()
for loc in acc_tables.keys():
acc_tab = acc_tables[loc]
acc_tab = pd.DataFrame(acc_tab)
acc_tab["location"] = loc
data = data.append(acc_tab)
return data
def clean_rwhar(filepath, sel_location=None):
# the function reads the files in RWHAR dataset and each subject and each activity labelled in a panda table
# filepath is the parent folder containing all the RWHAR dataset.
# Note: all entries are loaded but their timestamps are not syncronised. So a single location must be selected and
# all entries with NA must be dropped.
subject_dir = os.listdir(filepath)
dataset = pd.DataFrame()
for sub in subject_dir:
if "proband" not in sub:
continue
# files = os.listdir(filepath+sub)
# files = [file for file in files if (("acc" in file or "gyr" in file) and "csv" in file)]
subject_num = int(sub[7:]) - 1 # proband is 7 letters long so subject num is number following that
sub_pd = pd.DataFrame()
for activity in RWHAR_ACTIVITY_NUM.keys(): # pair the acc and gyr zips of the same activity
activity_name = "_" + activity + "_csv.zip"
path_acc = filepath + '/' + sub + "/acc" + activity_name # concat the path to acc file for given activity and subject
table = rwhar_load_table_activity(path_acc)
table["activity"] = RWHAR_ACTIVITY_NUM[activity] # add a activity column and fill it with activity num
sub_pd = sub_pd.append(table)
sub_pd["subject"] = subject_num # add subject id to all entries
dataset = dataset.append(sub_pd)
dataset = dataset.dropna()
dataset = dataset[dataset.location == RWHAR_BAND_LOCATION[sel_location]]
if sel_location is not None:
print("Selecting location : ", sel_location)
dataset = dataset[dataset.location == RWHAR_BAND_LOCATION[sel_location]]
dataset = dataset.drop(columns="location")
dataset = dataset.sort_values(by=['subject', 'timestamp'])
dataset = dataset.drop(columns="timestamp")
dataset = dataset.dropna()
print(dataset['activity'].value_counts())
return dataset
data = clean_rwhar(folder, sel_location='forearm')
data = data[['subject', 'acc_x', 'acc_y', 'acc_z', 'activity']]
return data
def create_opportunity_dataset(folder, output_folder):
# Hardcoded number of sensor channels employed in the OPPORTUNITY challenge
NB_SENSOR_CHANNELS = 113
# Hardcoded names of the files defining the OPPORTUNITY challenge data. As named in the original data.
OPPORTUNITY_DATA_FILES = [# Ordonez training
(0, 'OpportunityUCIDataset/dataset/S1-Drill.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL1.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL2.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL3.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL4.dat'),
(0, 'OpportunityUCIDataset/dataset/S1-ADL5.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-Drill.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-ADL1.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-ADL2.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-Drill.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL1.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL2.dat'),
# Ordonez validation
(1, 'OpportunityUCIDataset/dataset/S2-ADL3.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL3.dat'),
# Ordonez testing
(1, 'OpportunityUCIDataset/dataset/S2-ADL4.dat'),
(1, 'OpportunityUCIDataset/dataset/S2-ADL5.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL4.dat'),
(2, 'OpportunityUCIDataset/dataset/S3-ADL5.dat'),
# additional data
(3, 'OpportunityUCIDataset/dataset/S4-ADL1.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL2.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL3.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL4.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-ADL5.dat'),
(3, 'OpportunityUCIDataset/dataset/S4-Drill.dat')
]
# Hardcoded thresholds to define global maximums and minimums for every one of the 113 sensor channels employed in the
# OPPORTUNITY challenge
NORM_MAX_THRESHOLDS = [3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
3000, 3000, 3000, 10000, 10000, 10000, 1500, 1500, 1500,
250, 25, 200, 5000, 5000, 5000, 5000, 5000, 5000,
10000, 10000, 10000, 10000, 10000, 10000, 250, 250, 25,
200, 5000, 5000, 5000, 5000, 5000, 5000, 10000, 10000,
10000, 10000, 10000, 10000, 250, ]
NORM_MIN_THRESHOLDS = [-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-3000, -3000, -3000, -10000, -10000, -10000, -1000, -1000, -1000,
-250, -100, -200, -5000, -5000, -5000, -5000, -5000, -5000,
-10000, -10000, -10000, -10000, -10000, -10000, -250, -250, -100,
-200, -5000, -5000, -5000, -5000, -5000, -5000, -10000, -10000,
-10000, -10000, -10000, -10000, -250, ]
def select_columns_opp(data):
"""Selection of the 113 columns employed in the OPPORTUNITY challenge
:param data: numpy integer matrix
Sensor data (all features)
:return: numpy integer matrix
Selection of features
"""
# included-excluded
features_delete = np.arange(46, 50)
features_delete = np.concatenate([features_delete, np.arange(59, 63)])
features_delete = np.concatenate([features_delete, np.arange(72, 76)])
features_delete = np.concatenate([features_delete, np.arange(85, 89)])
features_delete = np.concatenate([features_delete, np.arange(98, 102)])
features_delete = np.concatenate([features_delete, np.arange(134, 243)])
features_delete = np.concatenate([features_delete, np.arange(244, 249)])
return np.delete(data, features_delete, 1)
def normalize(data, max_list, min_list):
"""Normalizes all sensor channels
:param data: numpy integer matrix
Sensor data
:param max_list: numpy integer array
Array containing maximums values for every one of the 113 sensor channels
:param min_list: numpy integer array
Array containing minimum values for every one of the 113 sensor channels
:return:
Normalized sensor data
"""
max_list, min_list = np.array(max_list), np.array(min_list)
diffs = max_list - min_list
for i in np.arange(data.shape[1]):
data[:, i] = (data[:, i] - min_list[i]) / diffs[i]
# Checking the boundaries
data[data > 1] = 0.99
data[data < 0] = 0.00
return data
def adjust_idx_labels(data_y, label):
"""Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param label: string, ['gestures' (default), 'locomotion']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
if label == 'locomotion': # Labels for locomotion are adjusted
data_y[data_y == 4] = 3
data_y[data_y == 5] = 4
elif label == 'gestures': # Labels for gestures are adjusted
data_y[data_y == 406516] = 1
data_y[data_y == 406517] = 2
data_y[data_y == 404516] = 3
data_y[data_y == 404517] = 4
data_y[data_y == 406520] = 5
data_y[data_y == 404520] = 6
data_y[data_y == 406505] = 7
data_y[data_y == 404505] = 8
data_y[data_y == 406519] = 9
data_y[data_y == 404519] = 10
data_y[data_y == 406511] = 11
data_y[data_y == 404511] = 12
data_y[data_y == 406508] = 13
data_y[data_y == 404508] = 14
data_y[data_y == 408512] = 15
data_y[data_y == 407521] = 16
data_y[data_y == 405506] = 17
return data_y
def process_dataset_file(data):
"""Function defined as a pipeline to process individual OPPORTUNITY files
:param data: numpy integer matrix
Matrix containing data samples (rows) for every sensor channel (column)
:return: numpy integer matrix, numy integer array
Processed sensor data, segmented into features (x) and labels (y)
"""
# Select correct columns
data = select_columns_opp(data)
data = data[:, 1:]
# adjust labels to be counting from 1
data[:, 113] = adjust_idx_labels(data[:, 113], 'locomotion').astype(int)
data[:, 114] = adjust_idx_labels(data[:, 114], 'gestures').astype(int)
# Perform linear interpolation
data[:, :113] = np.array([pd.Series(i).interpolate() for i in data[:, :113].T]).T
# Remaining missing data are converted to zero
data[:, :113][np.isnan(data[:, :113])] = 0
# All sensor channels are normalized
data[:, :113] = normalize(data[:, :113], NORM_MAX_THRESHOLDS, NORM_MIN_THRESHOLDS)
return data
def generate_data(dataset, target_filename):
"""Function to read the OPPORTUNITY challenge raw data and process all sensor channels
:param dataset: string
Path with original OPPORTUNITY zip file
:param target_filename: string
Processed file
"""
full = np.empty((0, NB_SENSOR_CHANNELS+3))
zf = zipfile.ZipFile(dataset)
print('Processing dataset files ...')
for sbj, filename in OPPORTUNITY_DATA_FILES:
try:
data = np.loadtxt(BytesIO(zf.read(filename)))
print('... file {0}'.format(filename))
_out = process_dataset_file(data)
_sbj = np.full((len(_out), 1), sbj)
_out = np.concatenate((_sbj, _out), axis=1)
full = np.vstack((full, _out))
print(filename, full.shape)
except KeyError:
print('ERROR: Did not find {0} in zip file'.format(filename))
# Dataset is segmented into train and test
nb_test_samples = 676713
print("Final dataset with size: {0} ".format(full.shape))
# write full dataset
pd.DataFrame(full, index=None).to_csv(os.path.join(target_filename + '_data.csv'), index=False, header=False)
# write Ordonez split
pd.DataFrame(full[:nb_test_samples, :], index=None).to_csv(target_filename + '_ordonez_data.csv', index=False, header=False)
generate_data(os.path.join(folder, 'OpportunityUCIDataset.zip'), output_folder)
if __name__ == '__main__':
# opportunity
create_opportunity_dataset('../data/raw/opportunity', '../data/opportunity')
# wetlab
feat = lambda streams: [s for s in streams if s.type == "audio"]
label = lambda streams: [s for s in streams if s.type == "subtitle"]
create_wetlab_data_from_mkvs(feat, label, '../data/raw/wetlab', 50).to_csv(
'../data/wetlab_data.csv', index=False, header=False)
# sbhar
create_sbhar_dataset('../data/raw/sbhar').to_csv(
'../data/sbhar_data.csv', index=False, header=False)
# hhar
create_hhar_dataset('../data/raw/hhar').to_csv(
'../data/hhar_data.csv', index=False, header=False)
# rwhar
create_rwhar_dataset('../data/raw/rwhar').to_csv(
'../data/rwhar_data.csv', index=False, header=False)
| en | 0.776996 | Function which converts milliseconds to hertz timestamps :param start: start time in milliseconds :param end: end time in milliseconds :param rate: employed sampling rate during recording :return: start and end time in hertz Funtion which creates the a csv file using the WetLab mkv dataset files as input. :param feature_tracks: tracks which contain the features that are to be used from the wetlab mkvs :param label_tracks: tracks which contain the labels that are to be used from the wetlab mkvs :param directory: directory where the resulting csv is to be saved to :param sample_rate: sampling rate :return: pandas dataframe containing wetlab features # obtain unique labels # gyro_data = [f for f in os.listdir(folder) if 'gyro' in f] # gyro_sbj_files = [f for f in gyro_data if 'user0' + str(sbj + 1) in f] # gyro_sbj_files = [f for f in gyro_data if 'user' + str(sbj + 1) in f] # acc + gyro # gyro_tmp_data = np.loadtxt(os.path.join(folder, 'gyro_exp' + exp + '_user' + sbj + '.txt'), delimiter=' ') # tmp_data = np.concatenate((acc_tmp_data, gyro_tmp_data), axis=1) Author : <NAME>, <EMAIL> :return: # verify that the path is to the zip containing csv and not another zip of csv # There are multiple zips in some cases # max chosen so the exact same acc and gyr files are selected each time (repeatability) # Loads up the csv at given path, returns a dictionary of data at each location # location is between last _ and .csv extension # Logic for loading each activity zip file for acc and gyr and then merging the tables at each location # the function reads the files in RWHAR dataset and each subject and each activity labelled in a panda table # filepath is the parent folder containing all the RWHAR dataset. # Note: all entries are loaded but their timestamps are not syncronised. So a single location must be selected and # all entries with NA must be dropped. # files = os.listdir(filepath+sub) # files = [file for file in files if (("acc" in file or "gyr" in file) and "csv" in file)] # proband is 7 letters long so subject num is number following that # pair the acc and gyr zips of the same activity # concat the path to acc file for given activity and subject # add a activity column and fill it with activity num # add subject id to all entries # Hardcoded number of sensor channels employed in the OPPORTUNITY challenge # Hardcoded names of the files defining the OPPORTUNITY challenge data. As named in the original data. # Ordonez training # Ordonez validation # Ordonez testing # additional data # Hardcoded thresholds to define global maximums and minimums for every one of the 113 sensor channels employed in the # OPPORTUNITY challenge Selection of the 113 columns employed in the OPPORTUNITY challenge :param data: numpy integer matrix Sensor data (all features) :return: numpy integer matrix Selection of features # included-excluded Normalizes all sensor channels :param data: numpy integer matrix Sensor data :param max_list: numpy integer array Array containing maximums values for every one of the 113 sensor channels :param min_list: numpy integer array Array containing minimum values for every one of the 113 sensor channels :return: Normalized sensor data # Checking the boundaries Transforms original labels into the range [0, nb_labels-1] :param data_y: numpy integer array Sensor labels :param label: string, ['gestures' (default), 'locomotion'] Type of activities to be recognized :return: numpy integer array Modified sensor labels # Labels for locomotion are adjusted # Labels for gestures are adjusted Function defined as a pipeline to process individual OPPORTUNITY files :param data: numpy integer matrix Matrix containing data samples (rows) for every sensor channel (column) :return: numpy integer matrix, numy integer array Processed sensor data, segmented into features (x) and labels (y) # Select correct columns # adjust labels to be counting from 1 # Perform linear interpolation # Remaining missing data are converted to zero # All sensor channels are normalized Function to read the OPPORTUNITY challenge raw data and process all sensor channels :param dataset: string Path with original OPPORTUNITY zip file :param target_filename: string Processed file # Dataset is segmented into train and test # write full dataset # write Ordonez split # opportunity # wetlab # sbhar # hhar # rwhar | 3.027001 | 3 |
bql/execution/runBenchmarks.py | sensorbee/sensorbee | 228 | 6622824 | #!/usr/bin/python
import os
import sys
import numpy
results = {}
for i in range(10):
output = os.popen("go test -run=XYZ -bench=.").read()
lines = output.strip().split("\n")
header = lines[0]
if not header == "PASS":
print "failed to run tests"
sys.exit(1)
for benchmark in lines[1:-1]:
name = benchmark.split()[0]
duration = benchmark.split()[-2]
if name in results:
results[name].append(int(duration))
else:
results[name] = [int(duration)]
for name in sorted(results.keys()):
values = results[name]
print name + "\t" + str(numpy.mean(values)) + "\t" + str(numpy.std(values))
| #!/usr/bin/python
import os
import sys
import numpy
results = {}
for i in range(10):
output = os.popen("go test -run=XYZ -bench=.").read()
lines = output.strip().split("\n")
header = lines[0]
if not header == "PASS":
print "failed to run tests"
sys.exit(1)
for benchmark in lines[1:-1]:
name = benchmark.split()[0]
duration = benchmark.split()[-2]
if name in results:
results[name].append(int(duration))
else:
results[name] = [int(duration)]
for name in sorted(results.keys()):
values = results[name]
print name + "\t" + str(numpy.mean(values)) + "\t" + str(numpy.std(values))
| ru | 0.258958 | #!/usr/bin/python | 2.518391 | 3 |
kraken/performance_scenarios/sql_histogram.py | FeixiDev/VersaTST | 2 | 6622825 | import sqlite3
import numpy as np
import matplotlib as mpl
from prettytable.prettytable import from_db_cursor
import yaml
mpl.use ('Agg')
# mpl.use ('TKAgg')
import matplotlib.pyplot as plt
import kraken.performance_scenarios.utils as utils
import kraken.performance_scenarios.log as log
import kraken.performance_scenarios.sql_chart as gcdb
class graph_histogram_manual (object):
def __init__(self):
a_yaml_file = open('./scenarios/P_sql_config.yml')
self.a = yaml.load(a_yaml_file, Loader = yaml.FullLoader)
def graph_histogram_output(self):
con = sqlite3.connect ('sqldatabase_test.db') # create connection object and database file
cur = con.cursor() # create a cursor for connection object
text_table = []
drbd = []
values = []
for i in range(len(self.a['Table_hist'])):
sql_sentence = 'SELECT Text_Table_Name, DRBD_Type,' + ' ' + self.a['Select_Data_hist'] + ' ' + 'FROM Index_table,' + self.a['Table_hist'][i] \
+ ' ' + 'WHERE Readwrite_type = ' + self.a['Readwrite_hist']\
+ ' ' + 'AND Number_of_Job = ' + self.a['Number_of_Job_hist']\
+ ' ' + 'AND IOdepth = ' + self.a['IOdepth_hist']\
+ ' ' + 'AND blocksize = ' + self.a['Blocksize_hist']\
+ ' ' + 'AND Index_table.Key_ID =' + ' ' + self.a['Table_hist'][i] + '.Key_ID' \
sql_result = cur.execute(sql_sentence)
for row in sql_result:
text_table.append(row[0])
drbd.append(row[1])
values.append(row[2])
# print(row)
plt.figure(figsize=(20,20), dpi = 100)
bar_width = 0.3
for i in range(len(drbd)):
x_data = drbd[i]
y_data = values[i]
plt.bar(x_data, y_data, label = text_table[i], width = bar_width)
plt.xlabel ('DRBD Type')
plt.ylabel (self.a['Select_Data_hist'])
plt.xticks (rotation = 30)
for x,y in zip(drbd,values):
plt.text(x, y+0.05, '%.2f' % y, ha = 'center', va = 'bottom', fontsize = 11)
plt.title(self.a['Select_Data_hist'] + ' ' + 'under Different DRBD Type (Readwrite type = ' + self.a['Readwrite_hist'] + ', Blockszie =' + self.a['Blocksize_hist'] + ')')
plt.legend()
plt.grid()
save_file_name = str(self.a['Table_hist'][0]) + '-' + 'histogram' + '-' + str(self.a['Select_Data_hist']) + '-' + self.a['Readwrite_hist'] + '-' + str(self.a['Blocksize_hist']) + '.png'
plt.legend()
plt.grid()
plt.savefig(save_file_name)
plt.draw()
plt.close(1)
# plt.show()
cur.close()
con.commit()
con.close()
class sql_graph_histogram (object):
def __init__(self,Table_Names):
self.table_n = Table_Names
get_info = gcdb.Get_info_db(self.table_n)
self.rwType_info = get_info.get_rwType_db()
self.nj_info = get_info.get_nj_db()
self.IOPS_4K_list = ['IOPS','4k']
self.MBPS_1M_list = ['MBPS','1M']
def get_data_histogram(self,IO_MB_list):
con = sqlite3.connect ('sqldatabase_test.db')
cur = con.cursor()
self.IO_MB_list = IO_MB_list
file_name_list = []
self.drbd = []
values = []
for i in range(len(self.rwType_info)):
nj_info ='"{}"'.format(self.nj_info[0])
bs = '"{}"'.format(self.IO_MB_list[1])
rw = '"{}"'.format(self.rwType_info[i])
sql_sentence = 'SELECT DRBD_Type,' + ' ' + self.IO_MB_list[0] + ' ' + 'FROM Index_table,' + self.table_n \
+ ' ' + 'WHERE Readwrite_type = ' + rw\
+ ' ' + 'AND Number_of_Job = ' + nj_info \
+ ' ' + 'AND IOdepth = "8"'\
+ ' ' + 'AND blocksize =' + bs \
+ ' ' + 'AND Index_table.Key_ID =' + ' ' + self.table_n + '.Key_ID' \
sql_result = cur.execute(sql_sentence)
file_name = 'histogram' + '-' + self.IO_MB_list[0] + '-' + self.rwType_info[i] + '-' + self.IO_MB_list[1]
file_name_list.append(file_name)
for row in sql_result:
self.drbd.append(row[0])
values.append(row[1])
cur.close()
con.commit()
con.close()
return file_name_list,values
def draw_graph_histogram(self,fn_list,values):
drbd_t = list(set(self.drbd))
if len(drbd_t) == 0:
utils.prt_log('', f"Table has not bs=4k or bs=1M data", 1)
drbd_t.sort(key=self.drbd.index)
utils.prt_log('', drbd_t, 0)
num_of_device = len(drbd_t)
plt.figure(figsize=(20,20), dpi = 100)
bar_width = 0.3
flag = 0
for b in [values[i:i + num_of_device] for i in range(0, len(values), num_of_device)]:
# print('bbbbb',b)
for i in range(len(drbd_t)):
x_data = self.drbd[i]
y_data = b[i]
plt.bar(x_data, y_data, label=drbd_t[i],width = bar_width)
plt.title(fn_list[flag])
plt.xlabel ('DRBD Type')
if 'IOPS' in str(fn_list[0].split('-')):
plt.ylabel (self.IOPS_4K_list[0])
else:
plt.ylabel (self.MBPS_1M_list[0])
plt.xticks (rotation = 30)
plt.legend()
plt.grid()
png_name = self.table_n +'-' + fn_list[flag]
plt.savefig(f"./kraken/performance_scenarios/performance_data/{png_name}")
str1 = 'Save ' + png_name + '.png to performance_data directory , Done'
utils.write_log('', str1, 0)
flag +=1
if flag == len(self.rwType_info):
flag = 0
plt.draw()
plt.close()
# plt.show()
if __name__ == '__main__':
pass
# sql_graph_output()
| import sqlite3
import numpy as np
import matplotlib as mpl
from prettytable.prettytable import from_db_cursor
import yaml
mpl.use ('Agg')
# mpl.use ('TKAgg')
import matplotlib.pyplot as plt
import kraken.performance_scenarios.utils as utils
import kraken.performance_scenarios.log as log
import kraken.performance_scenarios.sql_chart as gcdb
class graph_histogram_manual (object):
def __init__(self):
a_yaml_file = open('./scenarios/P_sql_config.yml')
self.a = yaml.load(a_yaml_file, Loader = yaml.FullLoader)
def graph_histogram_output(self):
con = sqlite3.connect ('sqldatabase_test.db') # create connection object and database file
cur = con.cursor() # create a cursor for connection object
text_table = []
drbd = []
values = []
for i in range(len(self.a['Table_hist'])):
sql_sentence = 'SELECT Text_Table_Name, DRBD_Type,' + ' ' + self.a['Select_Data_hist'] + ' ' + 'FROM Index_table,' + self.a['Table_hist'][i] \
+ ' ' + 'WHERE Readwrite_type = ' + self.a['Readwrite_hist']\
+ ' ' + 'AND Number_of_Job = ' + self.a['Number_of_Job_hist']\
+ ' ' + 'AND IOdepth = ' + self.a['IOdepth_hist']\
+ ' ' + 'AND blocksize = ' + self.a['Blocksize_hist']\
+ ' ' + 'AND Index_table.Key_ID =' + ' ' + self.a['Table_hist'][i] + '.Key_ID' \
sql_result = cur.execute(sql_sentence)
for row in sql_result:
text_table.append(row[0])
drbd.append(row[1])
values.append(row[2])
# print(row)
plt.figure(figsize=(20,20), dpi = 100)
bar_width = 0.3
for i in range(len(drbd)):
x_data = drbd[i]
y_data = values[i]
plt.bar(x_data, y_data, label = text_table[i], width = bar_width)
plt.xlabel ('DRBD Type')
plt.ylabel (self.a['Select_Data_hist'])
plt.xticks (rotation = 30)
for x,y in zip(drbd,values):
plt.text(x, y+0.05, '%.2f' % y, ha = 'center', va = 'bottom', fontsize = 11)
plt.title(self.a['Select_Data_hist'] + ' ' + 'under Different DRBD Type (Readwrite type = ' + self.a['Readwrite_hist'] + ', Blockszie =' + self.a['Blocksize_hist'] + ')')
plt.legend()
plt.grid()
save_file_name = str(self.a['Table_hist'][0]) + '-' + 'histogram' + '-' + str(self.a['Select_Data_hist']) + '-' + self.a['Readwrite_hist'] + '-' + str(self.a['Blocksize_hist']) + '.png'
plt.legend()
plt.grid()
plt.savefig(save_file_name)
plt.draw()
plt.close(1)
# plt.show()
cur.close()
con.commit()
con.close()
class sql_graph_histogram (object):
def __init__(self,Table_Names):
self.table_n = Table_Names
get_info = gcdb.Get_info_db(self.table_n)
self.rwType_info = get_info.get_rwType_db()
self.nj_info = get_info.get_nj_db()
self.IOPS_4K_list = ['IOPS','4k']
self.MBPS_1M_list = ['MBPS','1M']
def get_data_histogram(self,IO_MB_list):
con = sqlite3.connect ('sqldatabase_test.db')
cur = con.cursor()
self.IO_MB_list = IO_MB_list
file_name_list = []
self.drbd = []
values = []
for i in range(len(self.rwType_info)):
nj_info ='"{}"'.format(self.nj_info[0])
bs = '"{}"'.format(self.IO_MB_list[1])
rw = '"{}"'.format(self.rwType_info[i])
sql_sentence = 'SELECT DRBD_Type,' + ' ' + self.IO_MB_list[0] + ' ' + 'FROM Index_table,' + self.table_n \
+ ' ' + 'WHERE Readwrite_type = ' + rw\
+ ' ' + 'AND Number_of_Job = ' + nj_info \
+ ' ' + 'AND IOdepth = "8"'\
+ ' ' + 'AND blocksize =' + bs \
+ ' ' + 'AND Index_table.Key_ID =' + ' ' + self.table_n + '.Key_ID' \
sql_result = cur.execute(sql_sentence)
file_name = 'histogram' + '-' + self.IO_MB_list[0] + '-' + self.rwType_info[i] + '-' + self.IO_MB_list[1]
file_name_list.append(file_name)
for row in sql_result:
self.drbd.append(row[0])
values.append(row[1])
cur.close()
con.commit()
con.close()
return file_name_list,values
def draw_graph_histogram(self,fn_list,values):
drbd_t = list(set(self.drbd))
if len(drbd_t) == 0:
utils.prt_log('', f"Table has not bs=4k or bs=1M data", 1)
drbd_t.sort(key=self.drbd.index)
utils.prt_log('', drbd_t, 0)
num_of_device = len(drbd_t)
plt.figure(figsize=(20,20), dpi = 100)
bar_width = 0.3
flag = 0
for b in [values[i:i + num_of_device] for i in range(0, len(values), num_of_device)]:
# print('bbbbb',b)
for i in range(len(drbd_t)):
x_data = self.drbd[i]
y_data = b[i]
plt.bar(x_data, y_data, label=drbd_t[i],width = bar_width)
plt.title(fn_list[flag])
plt.xlabel ('DRBD Type')
if 'IOPS' in str(fn_list[0].split('-')):
plt.ylabel (self.IOPS_4K_list[0])
else:
plt.ylabel (self.MBPS_1M_list[0])
plt.xticks (rotation = 30)
plt.legend()
plt.grid()
png_name = self.table_n +'-' + fn_list[flag]
plt.savefig(f"./kraken/performance_scenarios/performance_data/{png_name}")
str1 = 'Save ' + png_name + '.png to performance_data directory , Done'
utils.write_log('', str1, 0)
flag +=1
if flag == len(self.rwType_info):
flag = 0
plt.draw()
plt.close()
# plt.show()
if __name__ == '__main__':
pass
# sql_graph_output()
| en | 0.250364 | # mpl.use ('TKAgg') # create connection object and database file # create a cursor for connection object # print(row) # plt.show() # print('bbbbb',b) # plt.show() # sql_graph_output() | 2.392496 | 2 |
OOP/Shop/project/product.py | petel3/Softuni_education | 2 | 6622826 | <gh_stars>1-10
class Product:
def __init__(self,name,quantity:int):
self.name = name
self.quantity = quantity
def decrease(self,quantity:int):
if self.quantity>=quantity:
self.quantity-=quantity
def increase(self,quantity:int):
self.quantity+=quantity
def __repr__(self):
return self.name
| class Product:
def __init__(self,name,quantity:int):
self.name = name
self.quantity = quantity
def decrease(self,quantity:int):
if self.quantity>=quantity:
self.quantity-=quantity
def increase(self,quantity:int):
self.quantity+=quantity
def __repr__(self):
return self.name | none | 1 | 3.54 | 4 | |
pyqt5/yaziresimekle.py | onselaydin/pytry | 0 | 6622827 | <gh_stars>0
import sys
from PyQt5 import QtWidgets,QtGui
import os
def Pencere():
app = QtWidgets.QApplication(sys.argv)
pencere = QtWidgets.QWidget()
pencere.setWindowTitle("Test deneme kontrol başlık")
etiket1 = QtWidgets.QLabel(pencere)
etiket1.setText("AD SOYAD:")
etiket1.move(150,40)
resim1 = QtWidgets.QLabel(pencere)
resim1.setPixmap(QtGui.QPixmap("/home/onsel/Documents/pytry/pyqt5/python.png"))
resim1.move(90,100)
pencere.setGeometry(100,100,500,500) #pencere boyutu tespiti
pencere.show()
app.exec_()
Pencere() | import sys
from PyQt5 import QtWidgets,QtGui
import os
def Pencere():
app = QtWidgets.QApplication(sys.argv)
pencere = QtWidgets.QWidget()
pencere.setWindowTitle("Test deneme kontrol başlık")
etiket1 = QtWidgets.QLabel(pencere)
etiket1.setText("AD SOYAD:")
etiket1.move(150,40)
resim1 = QtWidgets.QLabel(pencere)
resim1.setPixmap(QtGui.QPixmap("/home/onsel/Documents/pytry/pyqt5/python.png"))
resim1.move(90,100)
pencere.setGeometry(100,100,500,500) #pencere boyutu tespiti
pencere.show()
app.exec_()
Pencere() | tr | 0.429385 | #pencere boyutu tespiti | 2.691059 | 3 |
hooks/webkitpy/common/checkout/changelog_unittest.py | nizovn/luna-sysmgr | 3 | 6622828 | <filename>hooks/webkitpy/common/checkout/changelog_unittest.py
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import codecs
import os
import tempfile
import unittest
from StringIO import StringIO
from webkitpy.common.checkout.changelog import *
class ChangeLogTest(unittest.TestCase):
_example_entry = u'''2009-08-17 <NAME> <<EMAIL>>
Reviewed by <NAME>b\xf8.
https://bugs.webkit.org/show_bug.cgi?id=27323
Only add Cygwin to the path when it isn't already there. This avoids
causing problems for people who purposefully have non-Cygwin versions of
executables like svn in front of the Cygwin ones in their paths.
* DumpRenderTree/win/DumpRenderTree.vcproj:
* DumpRenderTree/win/ImageDiff.vcproj:
* DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj:
'''
_rolled_over_footer = '== Rolled over to ChangeLog-2009-06-16 =='
# More example text than we need. Eventually we need to support parsing this all and write tests for the parsing.
_example_changelog = u"""2009-08-17 <NAME>\xf8 <<EMAIL>>
<http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN()
Reviewed by <NAME>.
* Scripts/modules/cpp_style.py:
(_ERROR_CATEGORIES): Added 'runtime/max_min_macros'.
(check_max_min_macros): Added. Returns level 4 error when MAX()
and MIN() macros are used in header files and C++ source files.
(check_style): Added call to check_max_min_macros().
* Scripts/modules/cpp_style_unittest.py: Added unit tests.
(test_max_macro): Added.
(test_min_macro): Added.
2009-08-16 <NAME> <<EMAIL>>
Backed out r47343 which was mistakenly committed
* Scripts/bugzilla-tool:
* Scripts/modules/scm.py:
2009-06-18 <NAME> <<EMAIL>>
Rubber stamped by <NAME>.
* DumpRenderTree/mac/DumpRenderTreeWindow.mm:
(-[DumpRenderTreeWindow close]): Resolved crashes seen during regression
tests. The close method can be called on a window that's already closed
so we can't assert here.
== Rolled over to ChangeLog-2009-06-16 ==
"""
def test_parse_bug_id_from_changelog(self):
commit_text = '''
2011-03-23 <NAME> <<EMAIL>>
Add failing result for WebKit2. All tests that require
focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
* platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
'''
self.assertEquals(56988, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-23 <NAME> <<EMAIL>>
Add failing result for WebKit2. All tests that require
focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
https://bugs.webkit.org/show_bug.cgi?id=12345
* platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
'''
self.assertEquals(12345, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-31 <NAME> <<EMAIL>>
Quote the executable path we pass to ::CreateProcessW
This will ensure that spaces in the path will be interpreted correctly.
Fixes <http://webkit.org/b/57569> Web process sometimes fails to launch when there are
spaces in its path
Reviewed by <NAME>.
* UIProcess/Launcher/win/ProcessLauncherWin.cpp:
(WebKit::ProcessLauncher::launchProcess): Surround the executable path in quotes.
'''
self.assertEquals(57569, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-29 <NAME> <<EMAIL>>
Update WebCore Localizable.strings to contain WebCore, WebKit/mac and WebKit2 strings.
https://webkit.org/b/57354
Reviewed by <NAME>.
* English.lproj/Localizable.strings: Updated.
* StringsNotToBeLocalized.txt: Removed. To hard to maintain in WebCore.
* platform/network/cf/LoaderRunLoopCF.h: Remove a single quote in an #error so
extract-localizable-strings does not complain about unbalanced single quotes.
'''
self.assertEquals(57354, parse_bug_id_from_changelog(commit_text))
def test_parse_log_entries_from_changelog(self):
changelog_file = StringIO(self._example_changelog)
parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
self.assertEquals(len(parsed_entries), 3)
self.assertEquals(parsed_entries[0].reviewer_text(), "<NAME>")
self.assertEquals(parsed_entries[1].author_email(), "<EMAIL>")
self.assertEquals(parsed_entries[2].touched_files(), ["DumpRenderTree/mac/DumpRenderTreeWindow.mm"])
def test_latest_entry_parse(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._example_changelog)
changelog_file = StringIO(changelog_contents)
latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
self.assertEquals(latest_entry.contents(), self._example_entry)
self.assertEquals(latest_entry.author_name(), "<NAME>")
self.assertEquals(latest_entry.author_email(), "<EMAIL>")
self.assertEquals(latest_entry.reviewer_text(), u"<NAME>\xf8")
self.assertEquals(latest_entry.touched_files(), ["DumpRenderTree/win/DumpRenderTree.vcproj", "DumpRenderTree/win/ImageDiff.vcproj", "DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj"])
self.assertTrue(latest_entry.reviewer()) # Make sure that our UTF8-based lookup of Tor works.
def test_latest_entry_parse_single_entry(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._rolled_over_footer)
changelog_file = StringIO(changelog_contents)
latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
self.assertEquals(latest_entry.contents(), self._example_entry)
self.assertEquals(latest_entry.author_name(), "<NAME>")
@staticmethod
def _write_tmp_file_with_contents(byte_array):
assert(isinstance(byte_array, str))
(file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
with os.fdopen(file_descriptor, "w") as file:
file.write(byte_array)
return file_path
@staticmethod
def _read_file_contents(file_path, encoding):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
# FIXME: We really should be getting this from prepare-ChangeLog itself.
_new_entry_boilerplate = '''2009-08-19 <NAME> <<EMAIL>>
Need a short description and bug URL (OOPS!)
Reviewed by NOBODY (OOPS!).
* Scripts/bugzilla-tool:
'''
def test_set_reviewer(self):
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
reviewer_name = 'Test Reviewer'
ChangeLog(changelog_path).set_reviewer(reviewer_name)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
os.remove(changelog_path)
self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
def test_set_short_description_and_bug_url(self):
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
short_description = "A short description"
bug_url = "http://example.com/b/2344"
ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_message = "%s\n %s" % (short_description, bug_url)
expected_contents = changelog_contents.replace("Need a short description and bug URL (OOPS!)", expected_message)
os.remove(changelog_path)
self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
| <filename>hooks/webkitpy/common/checkout/changelog_unittest.py
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import codecs
import os
import tempfile
import unittest
from StringIO import StringIO
from webkitpy.common.checkout.changelog import *
class ChangeLogTest(unittest.TestCase):
_example_entry = u'''2009-08-17 <NAME> <<EMAIL>>
Reviewed by <NAME>b\xf8.
https://bugs.webkit.org/show_bug.cgi?id=27323
Only add Cygwin to the path when it isn't already there. This avoids
causing problems for people who purposefully have non-Cygwin versions of
executables like svn in front of the Cygwin ones in their paths.
* DumpRenderTree/win/DumpRenderTree.vcproj:
* DumpRenderTree/win/ImageDiff.vcproj:
* DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj:
'''
_rolled_over_footer = '== Rolled over to ChangeLog-2009-06-16 =='
# More example text than we need. Eventually we need to support parsing this all and write tests for the parsing.
_example_changelog = u"""2009-08-17 <NAME>\xf8 <<EMAIL>>
<http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN()
Reviewed by <NAME>.
* Scripts/modules/cpp_style.py:
(_ERROR_CATEGORIES): Added 'runtime/max_min_macros'.
(check_max_min_macros): Added. Returns level 4 error when MAX()
and MIN() macros are used in header files and C++ source files.
(check_style): Added call to check_max_min_macros().
* Scripts/modules/cpp_style_unittest.py: Added unit tests.
(test_max_macro): Added.
(test_min_macro): Added.
2009-08-16 <NAME> <<EMAIL>>
Backed out r47343 which was mistakenly committed
* Scripts/bugzilla-tool:
* Scripts/modules/scm.py:
2009-06-18 <NAME> <<EMAIL>>
Rubber stamped by <NAME>.
* DumpRenderTree/mac/DumpRenderTreeWindow.mm:
(-[DumpRenderTreeWindow close]): Resolved crashes seen during regression
tests. The close method can be called on a window that's already closed
so we can't assert here.
== Rolled over to ChangeLog-2009-06-16 ==
"""
def test_parse_bug_id_from_changelog(self):
commit_text = '''
2011-03-23 <NAME> <<EMAIL>>
Add failing result for WebKit2. All tests that require
focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
* platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
'''
self.assertEquals(56988, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-23 <NAME> <<EMAIL>>
Add failing result for WebKit2. All tests that require
focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
https://bugs.webkit.org/show_bug.cgi?id=12345
* platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
'''
self.assertEquals(12345, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-31 <NAME> <<EMAIL>>
Quote the executable path we pass to ::CreateProcessW
This will ensure that spaces in the path will be interpreted correctly.
Fixes <http://webkit.org/b/57569> Web process sometimes fails to launch when there are
spaces in its path
Reviewed by <NAME>.
* UIProcess/Launcher/win/ProcessLauncherWin.cpp:
(WebKit::ProcessLauncher::launchProcess): Surround the executable path in quotes.
'''
self.assertEquals(57569, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-29 <NAME> <<EMAIL>>
Update WebCore Localizable.strings to contain WebCore, WebKit/mac and WebKit2 strings.
https://webkit.org/b/57354
Reviewed by <NAME>.
* English.lproj/Localizable.strings: Updated.
* StringsNotToBeLocalized.txt: Removed. To hard to maintain in WebCore.
* platform/network/cf/LoaderRunLoopCF.h: Remove a single quote in an #error so
extract-localizable-strings does not complain about unbalanced single quotes.
'''
self.assertEquals(57354, parse_bug_id_from_changelog(commit_text))
def test_parse_log_entries_from_changelog(self):
changelog_file = StringIO(self._example_changelog)
parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
self.assertEquals(len(parsed_entries), 3)
self.assertEquals(parsed_entries[0].reviewer_text(), "<NAME>")
self.assertEquals(parsed_entries[1].author_email(), "<EMAIL>")
self.assertEquals(parsed_entries[2].touched_files(), ["DumpRenderTree/mac/DumpRenderTreeWindow.mm"])
def test_latest_entry_parse(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._example_changelog)
changelog_file = StringIO(changelog_contents)
latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
self.assertEquals(latest_entry.contents(), self._example_entry)
self.assertEquals(latest_entry.author_name(), "<NAME>")
self.assertEquals(latest_entry.author_email(), "<EMAIL>")
self.assertEquals(latest_entry.reviewer_text(), u"<NAME>\xf8")
self.assertEquals(latest_entry.touched_files(), ["DumpRenderTree/win/DumpRenderTree.vcproj", "DumpRenderTree/win/ImageDiff.vcproj", "DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj"])
self.assertTrue(latest_entry.reviewer()) # Make sure that our UTF8-based lookup of Tor works.
def test_latest_entry_parse_single_entry(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._rolled_over_footer)
changelog_file = StringIO(changelog_contents)
latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
self.assertEquals(latest_entry.contents(), self._example_entry)
self.assertEquals(latest_entry.author_name(), "<NAME>")
@staticmethod
def _write_tmp_file_with_contents(byte_array):
assert(isinstance(byte_array, str))
(file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
with os.fdopen(file_descriptor, "w") as file:
file.write(byte_array)
return file_path
@staticmethod
def _read_file_contents(file_path, encoding):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
# FIXME: We really should be getting this from prepare-ChangeLog itself.
_new_entry_boilerplate = '''2009-08-19 <NAME> <<EMAIL>>
Need a short description and bug URL (OOPS!)
Reviewed by NOBODY (OOPS!).
* Scripts/bugzilla-tool:
'''
def test_set_reviewer(self):
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
reviewer_name = 'Test Reviewer'
ChangeLog(changelog_path).set_reviewer(reviewer_name)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
os.remove(changelog_path)
self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
def test_set_short_description_and_bug_url(self):
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
short_description = "A short description"
bug_url = "http://example.com/b/2344"
ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_message = "%s\n %s" % (short_description, bug_url)
expected_contents = changelog_contents.replace("Need a short description and bug URL (OOPS!)", expected_message)
os.remove(changelog_path)
self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
| en | 0.720879 | # Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2009-08-17 <NAME> <<EMAIL>> Reviewed by <NAME>b\xf8. https://bugs.webkit.org/show_bug.cgi?id=27323 Only add Cygwin to the path when it isn't already there. This avoids causing problems for people who purposefully have non-Cygwin versions of executables like svn in front of the Cygwin ones in their paths. * DumpRenderTree/win/DumpRenderTree.vcproj: * DumpRenderTree/win/ImageDiff.vcproj: * DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj: # More example text than we need. Eventually we need to support parsing this all and write tests for the parsing. 2009-08-17 <NAME>\xf8 <<EMAIL>> <http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN() Reviewed by <NAME>. * Scripts/modules/cpp_style.py: (_ERROR_CATEGORIES): Added 'runtime/max_min_macros'. (check_max_min_macros): Added. Returns level 4 error when MAX() and MIN() macros are used in header files and C++ source files. (check_style): Added call to check_max_min_macros(). * Scripts/modules/cpp_style_unittest.py: Added unit tests. (test_max_macro): Added. (test_min_macro): Added. 2009-08-16 <NAME> <<EMAIL>> Backed out r47343 which was mistakenly committed * Scripts/bugzilla-tool: * Scripts/modules/scm.py: 2009-06-18 <NAME> <<EMAIL>> Rubber stamped by <NAME>. * DumpRenderTree/mac/DumpRenderTreeWindow.mm: (-[DumpRenderTreeWindow close]): Resolved crashes seen during regression tests. The close method can be called on a window that's already closed so we can't assert here. == Rolled over to ChangeLog-2009-06-16 == 2011-03-23 <NAME> <<EMAIL>> Add failing result for WebKit2. All tests that require focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988. * platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added. 2011-03-23 <NAME> <<EMAIL>> Add failing result for WebKit2. All tests that require focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988. https://bugs.webkit.org/show_bug.cgi?id=12345 * platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added. 2011-03-31 <NAME> <<EMAIL>> Quote the executable path we pass to ::CreateProcessW This will ensure that spaces in the path will be interpreted correctly. Fixes <http://webkit.org/b/57569> Web process sometimes fails to launch when there are spaces in its path Reviewed by <NAME>. * UIProcess/Launcher/win/ProcessLauncherWin.cpp: (WebKit::ProcessLauncher::launchProcess): Surround the executable path in quotes. 2011-03-29 <NAME> <<EMAIL>> Update WebCore Localizable.strings to contain WebCore, WebKit/mac and WebKit2 strings. https://webkit.org/b/57354 Reviewed by <NAME>. * English.lproj/Localizable.strings: Updated. * StringsNotToBeLocalized.txt: Removed. To hard to maintain in WebCore. * platform/network/cf/LoaderRunLoopCF.h: Remove a single quote in an #error so extract-localizable-strings does not complain about unbalanced single quotes. # Make sure that our UTF8-based lookup of Tor works. # NamedTemporaryFile always deletes the file on close in python < 2.6 # FIXME: We really should be getting this from prepare-ChangeLog itself. 2009-08-19 <NAME> <<EMAIL>> Need a short description and bug URL (OOPS!) Reviewed by NOBODY (OOPS!). * Scripts/bugzilla-tool: | 1.519807 | 2 |
dataset_converters/TDG2FRCNNConverter.py | igiloh/Dataset-Converters | 48 | 6622829 | from dataset_converters.ConverterBase import ConverterBase
import os
import cv2
class TDG2FRCNNConverter(ConverterBase):
formats = ['TDG2FRCNN']
def __init__(self, copy_fn):
ConverterBase.__init__(self, copy_fn)
def _run(self, input_folder, output_folder, FORMAT):
f = open(os.path.join(input_folder, 'bboxes.txt'), 'r')
text = f.read()
f.close()
saveDir = output_folder
imgDir = os.path.join(saveDir, 'Images')
self._ensure_folder_exists_and_is_clear(saveDir)
self._ensure_folder_exists_and_is_clear(imgDir)
lines = text.split('\n')
assert len(lines) > 0
saveCount = 1
i = 0
lastName = None
lastClass = None
f = open(os.path.join(saveDir, 'loc.trainval'), 'w')
for line in lines:
rois = []
tokens = line.split(' ')
assert len(tokens) > 1 and ((len(tokens) - 1) % 5 == 0)
name = os.path.join(imgDir, str(saveCount) + '.jpg')
for token in tokens:
if '.bmp' in token or '.jpg' in token or '.png' in token:
lastName = token
self.copy(os.path.join(input_folder, token), name)
else:
if i == 0:
lastClass = int(token)
assert(lastClass >= 1)
elif i == 1:
x = int(token)
assert x >= 0
elif i == 2:
y = int(token)
assert y >= 0
elif i == 3:
w = int(token)
assert w >= 2
elif i == 4:
h = int(token)
assert h >= 2
i += 1
if i % 5 == 0:
i = 0
rois.append([lastClass, x, y, x + w - 1, y + h - 1])
f.write('# ' + str(saveCount - 1) + '\n')
f.write('Images/' + str(saveCount) + '.jpg\n')
f.write(str(len(rois)) + '\n')
for roi in rois:
f.write(str(roi[0]))
for coord in roi[1:]:
f.write(' ' + str(coord))
f.write(' 0\n')
f.write('\n')
saveCount += 1
| from dataset_converters.ConverterBase import ConverterBase
import os
import cv2
class TDG2FRCNNConverter(ConverterBase):
formats = ['TDG2FRCNN']
def __init__(self, copy_fn):
ConverterBase.__init__(self, copy_fn)
def _run(self, input_folder, output_folder, FORMAT):
f = open(os.path.join(input_folder, 'bboxes.txt'), 'r')
text = f.read()
f.close()
saveDir = output_folder
imgDir = os.path.join(saveDir, 'Images')
self._ensure_folder_exists_and_is_clear(saveDir)
self._ensure_folder_exists_and_is_clear(imgDir)
lines = text.split('\n')
assert len(lines) > 0
saveCount = 1
i = 0
lastName = None
lastClass = None
f = open(os.path.join(saveDir, 'loc.trainval'), 'w')
for line in lines:
rois = []
tokens = line.split(' ')
assert len(tokens) > 1 and ((len(tokens) - 1) % 5 == 0)
name = os.path.join(imgDir, str(saveCount) + '.jpg')
for token in tokens:
if '.bmp' in token or '.jpg' in token or '.png' in token:
lastName = token
self.copy(os.path.join(input_folder, token), name)
else:
if i == 0:
lastClass = int(token)
assert(lastClass >= 1)
elif i == 1:
x = int(token)
assert x >= 0
elif i == 2:
y = int(token)
assert y >= 0
elif i == 3:
w = int(token)
assert w >= 2
elif i == 4:
h = int(token)
assert h >= 2
i += 1
if i % 5 == 0:
i = 0
rois.append([lastClass, x, y, x + w - 1, y + h - 1])
f.write('# ' + str(saveCount - 1) + '\n')
f.write('Images/' + str(saveCount) + '.jpg\n')
f.write(str(len(rois)) + '\n')
for roi in rois:
f.write(str(roi[0]))
for coord in roi[1:]:
f.write(' ' + str(coord))
f.write(' 0\n')
f.write('\n')
saveCount += 1
| none | 1 | 2.492778 | 2 | |
backend/accounts/views.py | mmohajer9/merchant | 4 | 6622830 | from rest_framework import viewsets
from rest_framework import permissions
# from rest_framework.response import Response
# from django.shortcuts import get_object_or_404
from .pagination import CustomLimitOffsetPagination
from .models import Country, Province, City, Seller, Address
from .serializers import (
CountrySerializer,
ProvinceSerializer,
CitySerializer,
SellerSerializer,
SellerDetailSerializer,
AddressSerializer,
)
from .generics import EnhancedModelViewSet
from .permissions import IsOwner, IsNotSeller, Forbidden
from .filters import SellerFilter
# Create your views here.
class CountryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Country.objects.all()
serializer_class = CountrySerializer
permission_classes = [permissions.AllowAny]
class ProvinceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Province.objects.all()
serializer_class = ProvinceSerializer
permission_classes = [permissions.AllowAny]
class CityViewSet(viewsets.ReadOnlyModelViewSet):
queryset = City.objects.all()
serializer_class = CitySerializer
permission_classes = [permissions.AllowAny]
class SellerViewSet(EnhancedModelViewSet):
queryset = Seller.objects.all()
pagination_class = CustomLimitOffsetPagination
# default serializer and permission classes
serializer_class = SellerSerializer
permission_classes = [permissions.IsAuthenticated, IsOwner]
filterset_class = SellerFilter
search_fields = ["title", "business_phone", "description", "user__username"]
ordering_fields = "__all__"
ordering = ["id"]
# override per action
action_serializers = {
# "list": Serializer1,
# "create": Serializer2,
"retrieve": SellerDetailSerializer,
# "update": Serializer4,
# "partial_update": Serializer5,
# "destroy": Serializer6,
}
# override per action
action_permission_classes = {
"list": [permissions.AllowAny],
"create": [permissions.IsAuthenticated, IsNotSeller],
"retrieve": [permissions.AllowAny],
"update": [permissions.IsAuthenticated, IsOwner],
"partial_update": [permissions.IsAuthenticated, IsOwner],
"destroy": [permissions.IsAuthenticated, IsOwner],
}
def perform_create(self, serializer):
serializer.save(user=self.request.user)
lookup_field = "user__username"
class AddressViewSet(EnhancedModelViewSet):
def get_queryset(self):
return Address.objects.filter(user=self.request.user).order_by("id")
# default serializer and permission classes
serializer_class = AddressSerializer
permission_classes = [permissions.IsAuthenticated, IsOwner]
def perform_create(self, serializer):
serializer.save(user=self.request.user)
| from rest_framework import viewsets
from rest_framework import permissions
# from rest_framework.response import Response
# from django.shortcuts import get_object_or_404
from .pagination import CustomLimitOffsetPagination
from .models import Country, Province, City, Seller, Address
from .serializers import (
CountrySerializer,
ProvinceSerializer,
CitySerializer,
SellerSerializer,
SellerDetailSerializer,
AddressSerializer,
)
from .generics import EnhancedModelViewSet
from .permissions import IsOwner, IsNotSeller, Forbidden
from .filters import SellerFilter
# Create your views here.
class CountryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Country.objects.all()
serializer_class = CountrySerializer
permission_classes = [permissions.AllowAny]
class ProvinceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Province.objects.all()
serializer_class = ProvinceSerializer
permission_classes = [permissions.AllowAny]
class CityViewSet(viewsets.ReadOnlyModelViewSet):
queryset = City.objects.all()
serializer_class = CitySerializer
permission_classes = [permissions.AllowAny]
class SellerViewSet(EnhancedModelViewSet):
queryset = Seller.objects.all()
pagination_class = CustomLimitOffsetPagination
# default serializer and permission classes
serializer_class = SellerSerializer
permission_classes = [permissions.IsAuthenticated, IsOwner]
filterset_class = SellerFilter
search_fields = ["title", "business_phone", "description", "user__username"]
ordering_fields = "__all__"
ordering = ["id"]
# override per action
action_serializers = {
# "list": Serializer1,
# "create": Serializer2,
"retrieve": SellerDetailSerializer,
# "update": Serializer4,
# "partial_update": Serializer5,
# "destroy": Serializer6,
}
# override per action
action_permission_classes = {
"list": [permissions.AllowAny],
"create": [permissions.IsAuthenticated, IsNotSeller],
"retrieve": [permissions.AllowAny],
"update": [permissions.IsAuthenticated, IsOwner],
"partial_update": [permissions.IsAuthenticated, IsOwner],
"destroy": [permissions.IsAuthenticated, IsOwner],
}
def perform_create(self, serializer):
serializer.save(user=self.request.user)
lookup_field = "user__username"
class AddressViewSet(EnhancedModelViewSet):
def get_queryset(self):
return Address.objects.filter(user=self.request.user).order_by("id")
# default serializer and permission classes
serializer_class = AddressSerializer
permission_classes = [permissions.IsAuthenticated, IsOwner]
def perform_create(self, serializer):
serializer.save(user=self.request.user)
| en | 0.42223 | # from rest_framework.response import Response # from django.shortcuts import get_object_or_404 # Create your views here. # default serializer and permission classes # override per action # "list": Serializer1, # "create": Serializer2, # "update": Serializer4, # "partial_update": Serializer5, # "destroy": Serializer6, # override per action # default serializer and permission classes | 1.958495 | 2 |
utils.py | IamEinstein/Ash | 0 | 6622831 | <gh_stars>0
def check_if_present(username: str, todo: str):
file = open('./users.txt', 'r')
if username in file.readlines():
if todo.lower() == "add":
file.close()
return False
else:
w_file = open('./users.txt', 'w')
w_file.write(username)
w_file.close()
return True
| def check_if_present(username: str, todo: str):
file = open('./users.txt', 'r')
if username in file.readlines():
if todo.lower() == "add":
file.close()
return False
else:
w_file = open('./users.txt', 'w')
w_file.write(username)
w_file.close()
return True | none | 1 | 3.440862 | 3 | |
analyzer.py | ibrahimhillowle/ibrahimhillowle.github.io | 0 | 6622832 | <reponame>ibrahimhillowle/ibrahimhillowle.github.io
import pandas as pd
from datetime import datetime
from datetime import timedelta
import lib_bamboo as bamboo
import os
os.system('cls') #Deze regel nog invullen! Hoe maak je het scherm leeg?
print("Working...")
data = pd.read_excel("Basketbal_Dutch Basketball League_tussenstand.xlsx")
data["datum"] = pd.to_datetime(data["datum"], format="%d/%m/%Y")
data = data.sort_values("datum")
#Informatievraag 1
sumovertreding =str(data['overtredingen'].sum())
file1 = open("files/sumovertreding.txt", "w", encoding="UTF-8")
file1.write(sumovertreding)
file1.close()
#Informatievraag 2
averageOvertreding = str(data["overtredingen"].mean())
file2 = open("files/gemiddeld.txt", "w", encoding="UTF-8")
file2.write(averageOvertreding)
file2.close()
#Informatievraag 3
zwartBoek = data_sorted=data.sort_values("overtredingen",ascending=False)
top5=data_sorted.head(5)
file3 = open("files/zwartboek.txt", "w", encoding="UTF-8")
file3.write(bamboo.prettify(top5, type="zwartboek"))
file3.close()
#Informatievraag 4
eregalerijFile = open("files/eregalerij.txt", "w", encoding="UTF-8")
data["datum"] = pd.to_datetime(data["datum"], format="%d%m%Y")
checkdays = 14
date_check = datetime.now() - timedelta(days=14)
eregalerijSorted = data.sort_values("datum", ascending=True)
filter = ((data["overtredingen"] < 2) & (data["datum"] > date_check))
eregalerij = eregalerijSorted[filter]
file4 = open("files/eregalerij.txt", "w", encoding="UTF-8")
file4.write(bamboo.prettify(eregalerij, type="eregalerij"))
file4.close()
print("Done!")
| import pandas as pd
from datetime import datetime
from datetime import timedelta
import lib_bamboo as bamboo
import os
os.system('cls') #Deze regel nog invullen! Hoe maak je het scherm leeg?
print("Working...")
data = pd.read_excel("Basketbal_Dutch Basketball League_tussenstand.xlsx")
data["datum"] = pd.to_datetime(data["datum"], format="%d/%m/%Y")
data = data.sort_values("datum")
#Informatievraag 1
sumovertreding =str(data['overtredingen'].sum())
file1 = open("files/sumovertreding.txt", "w", encoding="UTF-8")
file1.write(sumovertreding)
file1.close()
#Informatievraag 2
averageOvertreding = str(data["overtredingen"].mean())
file2 = open("files/gemiddeld.txt", "w", encoding="UTF-8")
file2.write(averageOvertreding)
file2.close()
#Informatievraag 3
zwartBoek = data_sorted=data.sort_values("overtredingen",ascending=False)
top5=data_sorted.head(5)
file3 = open("files/zwartboek.txt", "w", encoding="UTF-8")
file3.write(bamboo.prettify(top5, type="zwartboek"))
file3.close()
#Informatievraag 4
eregalerijFile = open("files/eregalerij.txt", "w", encoding="UTF-8")
data["datum"] = pd.to_datetime(data["datum"], format="%d%m%Y")
checkdays = 14
date_check = datetime.now() - timedelta(days=14)
eregalerijSorted = data.sort_values("datum", ascending=True)
filter = ((data["overtredingen"] < 2) & (data["datum"] > date_check))
eregalerij = eregalerijSorted[filter]
file4 = open("files/eregalerij.txt", "w", encoding="UTF-8")
file4.write(bamboo.prettify(eregalerij, type="eregalerij"))
file4.close()
print("Done!") | nl | 0.975913 | #Deze regel nog invullen! Hoe maak je het scherm leeg? #Informatievraag 1 #Informatievraag 2 #Informatievraag 3 #Informatievraag 4 | 2.928988 | 3 |
mmdet/models/roi_extractors/__init__.py | ktw361/Local-Mid-Propagation | 10 | 6622833 | <reponame>ktw361/Local-Mid-Propagation
from .single_level import SingleRoIExtractor
from .multi_levels import MultiRoIExtractor
from .rfcn_single_level import RfcnPSRoIExtractor
__all__ = ['SingleRoIExtractor', 'MultiRoIExtractor', 'RfcnPSRoIExtractor']
| from .single_level import SingleRoIExtractor
from .multi_levels import MultiRoIExtractor
from .rfcn_single_level import RfcnPSRoIExtractor
__all__ = ['SingleRoIExtractor', 'MultiRoIExtractor', 'RfcnPSRoIExtractor'] | none | 1 | 1.077861 | 1 | |
spectra_cluster/ui/protein_annotator.py | qinchunyuan/spectra-cluster-py | 3 | 6622834 | <reponame>qinchunyuan/spectra-cluster-py
"""protein_annotator
This tool adds a protein accession column to a text file based on the present peptides.
The column containing the peptide string must be defined. Then, all proteins that match the given peptide are
written to a specified column.
Usage:
protein_annotator.py --input=<input.tsv> --output=<extended_file.tsv> --fasta=<fasta_file.fasta>
[--peptide_column=<column_name>] [--protein_column=<column_name>]
[--protein_separator=<separator>] [--column_separator=<separator>]
[--ignore_il]
protein_annotator.py (--help | --version)
Options:
-i, --input=<input.tsv> Path to the input file.
-o, --output=<extended_file.tsv> Path to the output file that should be created.
-f, --fasta=<fasta_file.fasta> Fasta file to match the peptides to.
--peptide_column=<column_name> Column name of the peptide column [default: sequence]
--protein_column=<column_name> Column name of the newly added protein column [default: protein]
--protein_separator=<separator> Separator to separate multiple protein entries [default: ;]
--column_separator=<separator> Separator to separate columns in the file [default: TAB]
--ignore_il If set I/L are treated as synonymous.
-h, --help Print this help message.
-v, --version Print the current version.
"""
import sys
import os
import csv
from docopt import docopt
import re
# make the spectra_cluster packages available
sys.path.insert(0, os.path.abspath('..') + os.path.sep + "..")
from spectra_cluster.tools import fasta_paraser
def extract_separator(user_separator):
"""
Parses the user defined separator and returns the matching character.
:param user_separator: The user defined separator.
:return: The parsed character
"""
if user_separator == "TAB":
return "\t"
return user_separator
def load_peptides(input_file, peptide_column, column_separator):
"""
Parses the input file and extracts all peptides occuring within the file. Peptide strings
are cleaned (only valid characters retained) and returned as a set.
:param input_file: The file to parse
:param peptide_column: The column header to extract the peptides from.
:param column_separator: The separator used for the columns
:return: A set of strings representing the peptides.
"""
with open(input_file, "r") as input_stream:
csv_reader = csv.DictReader(input_stream, delimiter=column_separator)
peptides = set()
for row in csv_reader:
if peptide_column not in row:
raise Exception("Specified peptide column '" + peptide_column + "' not found in input file.")
sequence = row[peptide_column]
clean_sequence = re.sub("[^A-Z]", "", sequence)
peptides.add(clean_sequence)
return peptides
def map_peptides_to_proteins(peptides, fasta_filename, ignore_il=False):
"""
Maps the peptides to the proteins in the passed FASTA file.
:param peptides: A iterable containing the pepitde strings.
:param fasta_filename: Filename of the FASTA file to parse.
:param ignore_il: If set to True I/L are treated as the same AA.
:return: A dict with the peptide as key and the protein accessions as list.
"""
parser = fasta_paraser.FastaParser(fasta_filename)
peptide_protein_map = dict()
for fasta_entry in parser:
if ignore_il:
fasta_sequence = fasta_entry.sequence.replace("I", "L")
else:
fasta_sequence = fasta_entry.sequence
# check whether any of the known sequences is present in this protein
for sequence in peptides:
# replace all I with L if defined
if ignore_il:
comparison_sequence = sequence.replace("I", "L")
else:
comparison_sequence = sequence
if comparison_sequence in fasta_sequence:
# use the original peptide sequence as a key
if sequence not in peptide_protein_map:
peptide_protein_map[sequence] = list()
peptide_protein_map[sequence].append(fasta_entry.getAccession())
return peptide_protein_map
def write_extended_file(input_filename, output_filename, peptides_to_protein, column_separator, protein_separator,
peptide_column, protein_column):
"""
Creates the new file which only is a copy of the current file with the protein column added to the end
:param input_filename: The input filename path.
:param output_filename: The output filename path.
:param peptides_to_protein: A dict containing the peptide string as key and a list of protein acccessions as values.
:param column_separator: Separator used for the columns.
:param protein_separator: Separator used if multiple protein accessions are found.
:param peptide_column: Name of the peptide column.
:param protein_column: New name of the protein column.
:return:
"""
# get the index of the peptide column
peptide_index = -1
first_line = True
with open(output_filename, "w") as output_file:
with open(input_filename, "r") as input_file:
for line in input_file:
# remove the trailing line separator
line = line.rstrip("\n")
if first_line:
new_line = line + column_separator + protein_column + "\n"
output_file.write(new_line)
first_line = False
# get the index of the peptide column
fields = line.split(column_separator)
for i in range(0, len(fields)):
if fields[i] == peptide_column:
peptide_index = i
break
if peptide_index < 0:
raise Exception("Failed to find peptide column '" + peptide_column + "' in input file.")
else:
# get the peptide string
fields = line.split(column_separator)
sequence = fields[peptide_index]
clean_sequence = re.sub("[^A-Z]", "", sequence)
# write the original line
output_file.write(line + column_separator)
if clean_sequence in peptides_to_protein and peptides_to_protein[clean_sequence] is not None:
output_file.write(protein_separator.join(peptides_to_protein[clean_sequence]))
output_file.write("\n")
class ProteinMappings:
"""
Simple class to represent all proteins a peptide maps to
"""
def __init__(self, proteins):
"""
Creates a new PeptideToproteinMapping object
:param proteins: A list of proteins the pepitde maps to.
:return:
"""
self.proteins = proteins
self.n_proteins = len(self.proteins)
class ProteinGroup:
"""
Simple representation of a ProteinGroup
"""
def __init__(self, label, accessions):
"""
Creates a new protein group object
:param label: The label of the protein group.
:param accessions: The accessions of all member proteins
:return:
"""
self.label = label
self.accessions = tuple(accessions)
def __hash__(self):
return hash(self.label)
def do_protein_inference(peptides_to_proteins, protein_separator=";"):
"""
Warning: This function currently does **NOT** work correctly!
Creates the smallest set of protein (groups) required to explain all peptides. Ambiguous peptides
are not mapped to any group.
:param peptides_to_proteins: A dict with the peptide sequence as key and all mapping proteins as a list (value).
:param protein_separator: The separator to use when creating the label for protein groups.
:return: A dict with the peptide sequence as key and the protein / protein group accession as value (single entry
in a list for compatibility reasons).
"""
raise ArithmeticError()
# create list of PeptideToProteinMappings
mappings = [ProteinMappings(proteins) for proteins in peptides_to_proteins.values()]
# extract the smallest set of protein groups necessary to explain all peptides
# start with the peptides mapping to the smallest number of proteins (ie. partially unique ones)
mappings.sort(key=lambda x: x.n_proteins, reverse=False)
retained_proteins = set()
retained_protein_groups = list()
for mapping in mappings:
# if one of the accessions is already retained, simply ignore the rest
if any(True for accession in mapping.proteins if accession in retained_proteins):
continue
# create a new protein group
retained_proteins |= set(mapping.proteins)
retained_protein_groups.append(ProteinGroup(protein_separator.join(mapping.proteins), mapping.proteins))
# create a map of proteins to protein groups
protein_to_protein_group = dict()
for protein_group in retained_protein_groups:
for protein in protein_group.accessions:
protein_to_protein_group[protein] = protein_group
# remap the peptides
new_peptide_mappings = dict()
for sequence in peptides_to_proteins:
all_proteins = peptides_to_proteins[sequence]
# get all matching protein groups
matching_protein_groups = set()
for protein in all_proteins:
# if the protein is a subset protein, it was ignored
if protein not in protein_to_protein_group:
continue
matching_protein_groups.add(protein_to_protein_group[protein])
# if the peptide matches multiple protein groups, ignore it
if len(matching_protein_groups) > 1:
new_peptide_mappings[sequence] = None
elif len(matching_protein_groups) == 1:
new_peptide_mappings[sequence] = [list(matching_protein_groups)[0].label]
else:
# missing mappings are also represented by None
new_peptide_mappings[sequence] = None
return new_peptide_mappings
def main():
"""
Primary entry function for the CLI.
"""
arguments = docopt(__doc__, version='protein_annotator 1.0 BETA')
# make sure the input files exist
if not os.path.isfile(arguments['--input']):
print("Error: Cannot find input file '" + arguments["--input"] + "'")
sys.exit(1)
if not os.path.isfile(arguments['--fasta']):
print("Error: Cannot find fasta file '" + arguments["--fasta"] + "'")
sys.exit(1)
# make sure the output file does not exist
if os.path.isfile(arguments["--output"]):
print("Error: Output file exists '" + arguments["--output"] + "'")
sys.exit(1)
# get the separator
column_separator = extract_separator(arguments["--column_separator"])
protein_separator = extract_separator(arguments["--protein_separator"])
peptide_column = arguments["--peptide_column"]
protein_column = arguments["--protein_column"]
# load all peptides
print("Loading peptides from file...", end="")
peptides = load_peptides(arguments["--input"], peptide_column, column_separator)
print("Done. (" + str(len(peptides)) + " loaded)")
# map the proteins
print("Mapping peptides to proteins...", end="")
peptides_to_protein = map_peptides_to_proteins(peptides, arguments["--fasta"], arguments["--ignore_il"])
print("Done.")
# if arguments["--protein_inference"]:
# peptides_to_protein = do_protein_inference(peptides_to_protein, protein_separator)
# write the new file
write_extended_file(arguments["--input"], arguments["--output"], peptides_to_protein, column_separator,
protein_separator, peptide_column, protein_column)
if __name__ == "__main__":
main()
| """protein_annotator
This tool adds a protein accession column to a text file based on the present peptides.
The column containing the peptide string must be defined. Then, all proteins that match the given peptide are
written to a specified column.
Usage:
protein_annotator.py --input=<input.tsv> --output=<extended_file.tsv> --fasta=<fasta_file.fasta>
[--peptide_column=<column_name>] [--protein_column=<column_name>]
[--protein_separator=<separator>] [--column_separator=<separator>]
[--ignore_il]
protein_annotator.py (--help | --version)
Options:
-i, --input=<input.tsv> Path to the input file.
-o, --output=<extended_file.tsv> Path to the output file that should be created.
-f, --fasta=<fasta_file.fasta> Fasta file to match the peptides to.
--peptide_column=<column_name> Column name of the peptide column [default: sequence]
--protein_column=<column_name> Column name of the newly added protein column [default: protein]
--protein_separator=<separator> Separator to separate multiple protein entries [default: ;]
--column_separator=<separator> Separator to separate columns in the file [default: TAB]
--ignore_il If set I/L are treated as synonymous.
-h, --help Print this help message.
-v, --version Print the current version.
"""
import sys
import os
import csv
from docopt import docopt
import re
# make the spectra_cluster packages available
sys.path.insert(0, os.path.abspath('..') + os.path.sep + "..")
from spectra_cluster.tools import fasta_paraser
def extract_separator(user_separator):
"""
Parses the user defined separator and returns the matching character.
:param user_separator: The user defined separator.
:return: The parsed character
"""
if user_separator == "TAB":
return "\t"
return user_separator
def load_peptides(input_file, peptide_column, column_separator):
"""
Parses the input file and extracts all peptides occuring within the file. Peptide strings
are cleaned (only valid characters retained) and returned as a set.
:param input_file: The file to parse
:param peptide_column: The column header to extract the peptides from.
:param column_separator: The separator used for the columns
:return: A set of strings representing the peptides.
"""
with open(input_file, "r") as input_stream:
csv_reader = csv.DictReader(input_stream, delimiter=column_separator)
peptides = set()
for row in csv_reader:
if peptide_column not in row:
raise Exception("Specified peptide column '" + peptide_column + "' not found in input file.")
sequence = row[peptide_column]
clean_sequence = re.sub("[^A-Z]", "", sequence)
peptides.add(clean_sequence)
return peptides
def map_peptides_to_proteins(peptides, fasta_filename, ignore_il=False):
"""
Maps the peptides to the proteins in the passed FASTA file.
:param peptides: A iterable containing the pepitde strings.
:param fasta_filename: Filename of the FASTA file to parse.
:param ignore_il: If set to True I/L are treated as the same AA.
:return: A dict with the peptide as key and the protein accessions as list.
"""
parser = fasta_paraser.FastaParser(fasta_filename)
peptide_protein_map = dict()
for fasta_entry in parser:
if ignore_il:
fasta_sequence = fasta_entry.sequence.replace("I", "L")
else:
fasta_sequence = fasta_entry.sequence
# check whether any of the known sequences is present in this protein
for sequence in peptides:
# replace all I with L if defined
if ignore_il:
comparison_sequence = sequence.replace("I", "L")
else:
comparison_sequence = sequence
if comparison_sequence in fasta_sequence:
# use the original peptide sequence as a key
if sequence not in peptide_protein_map:
peptide_protein_map[sequence] = list()
peptide_protein_map[sequence].append(fasta_entry.getAccession())
return peptide_protein_map
def write_extended_file(input_filename, output_filename, peptides_to_protein, column_separator, protein_separator,
peptide_column, protein_column):
"""
Creates the new file which only is a copy of the current file with the protein column added to the end
:param input_filename: The input filename path.
:param output_filename: The output filename path.
:param peptides_to_protein: A dict containing the peptide string as key and a list of protein acccessions as values.
:param column_separator: Separator used for the columns.
:param protein_separator: Separator used if multiple protein accessions are found.
:param peptide_column: Name of the peptide column.
:param protein_column: New name of the protein column.
:return:
"""
# get the index of the peptide column
peptide_index = -1
first_line = True
with open(output_filename, "w") as output_file:
with open(input_filename, "r") as input_file:
for line in input_file:
# remove the trailing line separator
line = line.rstrip("\n")
if first_line:
new_line = line + column_separator + protein_column + "\n"
output_file.write(new_line)
first_line = False
# get the index of the peptide column
fields = line.split(column_separator)
for i in range(0, len(fields)):
if fields[i] == peptide_column:
peptide_index = i
break
if peptide_index < 0:
raise Exception("Failed to find peptide column '" + peptide_column + "' in input file.")
else:
# get the peptide string
fields = line.split(column_separator)
sequence = fields[peptide_index]
clean_sequence = re.sub("[^A-Z]", "", sequence)
# write the original line
output_file.write(line + column_separator)
if clean_sequence in peptides_to_protein and peptides_to_protein[clean_sequence] is not None:
output_file.write(protein_separator.join(peptides_to_protein[clean_sequence]))
output_file.write("\n")
class ProteinMappings:
"""
Simple class to represent all proteins a peptide maps to
"""
def __init__(self, proteins):
"""
Creates a new PeptideToproteinMapping object
:param proteins: A list of proteins the pepitde maps to.
:return:
"""
self.proteins = proteins
self.n_proteins = len(self.proteins)
class ProteinGroup:
"""
Simple representation of a ProteinGroup
"""
def __init__(self, label, accessions):
"""
Creates a new protein group object
:param label: The label of the protein group.
:param accessions: The accessions of all member proteins
:return:
"""
self.label = label
self.accessions = tuple(accessions)
def __hash__(self):
return hash(self.label)
def do_protein_inference(peptides_to_proteins, protein_separator=";"):
"""
Warning: This function currently does **NOT** work correctly!
Creates the smallest set of protein (groups) required to explain all peptides. Ambiguous peptides
are not mapped to any group.
:param peptides_to_proteins: A dict with the peptide sequence as key and all mapping proteins as a list (value).
:param protein_separator: The separator to use when creating the label for protein groups.
:return: A dict with the peptide sequence as key and the protein / protein group accession as value (single entry
in a list for compatibility reasons).
"""
raise ArithmeticError()
# create list of PeptideToProteinMappings
mappings = [ProteinMappings(proteins) for proteins in peptides_to_proteins.values()]
# extract the smallest set of protein groups necessary to explain all peptides
# start with the peptides mapping to the smallest number of proteins (ie. partially unique ones)
mappings.sort(key=lambda x: x.n_proteins, reverse=False)
retained_proteins = set()
retained_protein_groups = list()
for mapping in mappings:
# if one of the accessions is already retained, simply ignore the rest
if any(True for accession in mapping.proteins if accession in retained_proteins):
continue
# create a new protein group
retained_proteins |= set(mapping.proteins)
retained_protein_groups.append(ProteinGroup(protein_separator.join(mapping.proteins), mapping.proteins))
# create a map of proteins to protein groups
protein_to_protein_group = dict()
for protein_group in retained_protein_groups:
for protein in protein_group.accessions:
protein_to_protein_group[protein] = protein_group
# remap the peptides
new_peptide_mappings = dict()
for sequence in peptides_to_proteins:
all_proteins = peptides_to_proteins[sequence]
# get all matching protein groups
matching_protein_groups = set()
for protein in all_proteins:
# if the protein is a subset protein, it was ignored
if protein not in protein_to_protein_group:
continue
matching_protein_groups.add(protein_to_protein_group[protein])
# if the peptide matches multiple protein groups, ignore it
if len(matching_protein_groups) > 1:
new_peptide_mappings[sequence] = None
elif len(matching_protein_groups) == 1:
new_peptide_mappings[sequence] = [list(matching_protein_groups)[0].label]
else:
# missing mappings are also represented by None
new_peptide_mappings[sequence] = None
return new_peptide_mappings
def main():
"""
Primary entry function for the CLI.
"""
arguments = docopt(__doc__, version='protein_annotator 1.0 BETA')
# make sure the input files exist
if not os.path.isfile(arguments['--input']):
print("Error: Cannot find input file '" + arguments["--input"] + "'")
sys.exit(1)
if not os.path.isfile(arguments['--fasta']):
print("Error: Cannot find fasta file '" + arguments["--fasta"] + "'")
sys.exit(1)
# make sure the output file does not exist
if os.path.isfile(arguments["--output"]):
print("Error: Output file exists '" + arguments["--output"] + "'")
sys.exit(1)
# get the separator
column_separator = extract_separator(arguments["--column_separator"])
protein_separator = extract_separator(arguments["--protein_separator"])
peptide_column = arguments["--peptide_column"]
protein_column = arguments["--protein_column"]
# load all peptides
print("Loading peptides from file...", end="")
peptides = load_peptides(arguments["--input"], peptide_column, column_separator)
print("Done. (" + str(len(peptides)) + " loaded)")
# map the proteins
print("Mapping peptides to proteins...", end="")
peptides_to_protein = map_peptides_to_proteins(peptides, arguments["--fasta"], arguments["--ignore_il"])
print("Done.")
# if arguments["--protein_inference"]:
# peptides_to_protein = do_protein_inference(peptides_to_protein, protein_separator)
# write the new file
write_extended_file(arguments["--input"], arguments["--output"], peptides_to_protein, column_separator,
protein_separator, peptide_column, protein_column)
if __name__ == "__main__":
main() | en | 0.690182 | protein_annotator This tool adds a protein accession column to a text file based on the present peptides. The column containing the peptide string must be defined. Then, all proteins that match the given peptide are written to a specified column. Usage: protein_annotator.py --input=<input.tsv> --output=<extended_file.tsv> --fasta=<fasta_file.fasta> [--peptide_column=<column_name>] [--protein_column=<column_name>] [--protein_separator=<separator>] [--column_separator=<separator>] [--ignore_il] protein_annotator.py (--help | --version) Options: -i, --input=<input.tsv> Path to the input file. -o, --output=<extended_file.tsv> Path to the output file that should be created. -f, --fasta=<fasta_file.fasta> Fasta file to match the peptides to. --peptide_column=<column_name> Column name of the peptide column [default: sequence] --protein_column=<column_name> Column name of the newly added protein column [default: protein] --protein_separator=<separator> Separator to separate multiple protein entries [default: ;] --column_separator=<separator> Separator to separate columns in the file [default: TAB] --ignore_il If set I/L are treated as synonymous. -h, --help Print this help message. -v, --version Print the current version. # make the spectra_cluster packages available Parses the user defined separator and returns the matching character. :param user_separator: The user defined separator. :return: The parsed character Parses the input file and extracts all peptides occuring within the file. Peptide strings are cleaned (only valid characters retained) and returned as a set. :param input_file: The file to parse :param peptide_column: The column header to extract the peptides from. :param column_separator: The separator used for the columns :return: A set of strings representing the peptides. Maps the peptides to the proteins in the passed FASTA file. :param peptides: A iterable containing the pepitde strings. :param fasta_filename: Filename of the FASTA file to parse. :param ignore_il: If set to True I/L are treated as the same AA. :return: A dict with the peptide as key and the protein accessions as list. # check whether any of the known sequences is present in this protein # replace all I with L if defined # use the original peptide sequence as a key Creates the new file which only is a copy of the current file with the protein column added to the end :param input_filename: The input filename path. :param output_filename: The output filename path. :param peptides_to_protein: A dict containing the peptide string as key and a list of protein acccessions as values. :param column_separator: Separator used for the columns. :param protein_separator: Separator used if multiple protein accessions are found. :param peptide_column: Name of the peptide column. :param protein_column: New name of the protein column. :return: # get the index of the peptide column # remove the trailing line separator # get the index of the peptide column # get the peptide string # write the original line Simple class to represent all proteins a peptide maps to Creates a new PeptideToproteinMapping object :param proteins: A list of proteins the pepitde maps to. :return: Simple representation of a ProteinGroup Creates a new protein group object :param label: The label of the protein group. :param accessions: The accessions of all member proteins :return: Warning: This function currently does **NOT** work correctly! Creates the smallest set of protein (groups) required to explain all peptides. Ambiguous peptides are not mapped to any group. :param peptides_to_proteins: A dict with the peptide sequence as key and all mapping proteins as a list (value). :param protein_separator: The separator to use when creating the label for protein groups. :return: A dict with the peptide sequence as key and the protein / protein group accession as value (single entry in a list for compatibility reasons). # create list of PeptideToProteinMappings # extract the smallest set of protein groups necessary to explain all peptides # start with the peptides mapping to the smallest number of proteins (ie. partially unique ones) # if one of the accessions is already retained, simply ignore the rest # create a new protein group # create a map of proteins to protein groups # remap the peptides # get all matching protein groups # if the protein is a subset protein, it was ignored # if the peptide matches multiple protein groups, ignore it # missing mappings are also represented by None Primary entry function for the CLI. # make sure the input files exist # make sure the output file does not exist # get the separator # load all peptides # map the proteins # if arguments["--protein_inference"]: # peptides_to_protein = do_protein_inference(peptides_to_protein, protein_separator) # write the new file | 3.304748 | 3 |
scripts/utils/stats.py | coastalcph/eacl2021-morpherror | 2 | 6622835 | from stability_selection import RandomizedLogisticRegression
class PatchedRLG(RandomizedLogisticRegression):
def __init__(
self,
weakness=0.5,
tol=1e-4,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="liblinear",
max_iter=100,
multi_class="ovr",
verbose=0,
warm_start=False,
n_jobs=1,
callback_fn=None,
):
super().__init__(
weakness=weakness,
tol=tol,
C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight,
random_state=random_state,
solver=solver,
max_iter=max_iter,
multi_class=multi_class,
verbose=verbose,
warm_start=warm_start,
n_jobs=n_jobs,
)
self.callback_fn = callback_fn
def fit(self, X, y, **kwargs):
super().fit(X, y, **kwargs)
if self.callback_fn is not None:
self.callback_fn(self, X, y)
return self
| from stability_selection import RandomizedLogisticRegression
class PatchedRLG(RandomizedLogisticRegression):
def __init__(
self,
weakness=0.5,
tol=1e-4,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="liblinear",
max_iter=100,
multi_class="ovr",
verbose=0,
warm_start=False,
n_jobs=1,
callback_fn=None,
):
super().__init__(
weakness=weakness,
tol=tol,
C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight,
random_state=random_state,
solver=solver,
max_iter=max_iter,
multi_class=multi_class,
verbose=verbose,
warm_start=warm_start,
n_jobs=n_jobs,
)
self.callback_fn = callback_fn
def fit(self, X, y, **kwargs):
super().fit(X, y, **kwargs)
if self.callback_fn is not None:
self.callback_fn(self, X, y)
return self
| none | 1 | 2.40511 | 2 | |
presearch_bot.py | jakiiii/presearch | 0 | 6622836 | <reponame>jakiiii/presearch<filename>presearch_bot.py<gh_stars>0
from time import sleep
import random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
# TODO: Make a Long List of keyword
all_keys = ["american legends", "income", "funny videos", "python for kids"]
driver = webdriver.Chrome()
driver.implicitly_wait(5)
driver.get("https://engine.presearch.org")
print(input("Enter your Username and Password Menually then enter 1: "))
search_query = driver.find_element_by_name('q')
search_query.send_keys(random.choice(all_keys))
search_query.send_keys(Keys.RETURN)
sleep(5)
driver.execute_script("window.scrollBy(0, 800);")
sleep(3)
driver.execute_script("window.scrollBy(0, -750);")
sleep(2)
links = driver.find_elements_by_xpath('//h3/a[@href]')
link = driver.find_elements_by_xpath('//h3/a[@href]')[0]
url = link.get_attribute('href')
actions = ActionChains(driver)
tab = actions.send_keys(Keys.TAB * 20)
tab.perform()
new = tab.key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(Keys.ENTER)
new.perform()
driver.switch_to.window(driver.window_handles[1])
sleep(10)
driver.close()
driver.switch_to.window(driver.window_handles[0])
# ctrl + shift + enter => open new tab with link
'''
driver.execute_script("window.open('about:blank', 'tab2');")
driver.switch_to.window("tab2")
driver.get(url)
sleep(10)
driver.close()
driver.switch_to.window(driver.window_handles[0])
'''
'''
sel = Selector(text=driver.page_source)
link = sel.xpath('//h3/a/@href').extract()[0]
driver.execute_script("window.open()")
driver.switch_to.window(driver.window_handles[1])
driver.get(link)
sleep(10)
driver.close()
driver.switch_to.window(driver.window_handles[0])
'''
sleep(20)
driver.close()
driver.quit()
'''
from time import sleep
import random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
all_keys = ["american legends", "income", "funny videos", "python for kids"]
driver = webdriver.Chrome()
driver.get("https://engine.presearch.org")
search_query = driver.find_element_by_name('q')
search_query.send_keys(random.choice(all_keys))
search_query.send_keys(Keys.RETURN)
''' | from time import sleep
import random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
# TODO: Make a Long List of keyword
all_keys = ["american legends", "income", "funny videos", "python for kids"]
driver = webdriver.Chrome()
driver.implicitly_wait(5)
driver.get("https://engine.presearch.org")
print(input("Enter your Username and Password Menually then enter 1: "))
search_query = driver.find_element_by_name('q')
search_query.send_keys(random.choice(all_keys))
search_query.send_keys(Keys.RETURN)
sleep(5)
driver.execute_script("window.scrollBy(0, 800);")
sleep(3)
driver.execute_script("window.scrollBy(0, -750);")
sleep(2)
links = driver.find_elements_by_xpath('//h3/a[@href]')
link = driver.find_elements_by_xpath('//h3/a[@href]')[0]
url = link.get_attribute('href')
actions = ActionChains(driver)
tab = actions.send_keys(Keys.TAB * 20)
tab.perform()
new = tab.key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(Keys.ENTER)
new.perform()
driver.switch_to.window(driver.window_handles[1])
sleep(10)
driver.close()
driver.switch_to.window(driver.window_handles[0])
# ctrl + shift + enter => open new tab with link
'''
driver.execute_script("window.open('about:blank', 'tab2');")
driver.switch_to.window("tab2")
driver.get(url)
sleep(10)
driver.close()
driver.switch_to.window(driver.window_handles[0])
'''
'''
sel = Selector(text=driver.page_source)
link = sel.xpath('//h3/a/@href').extract()[0]
driver.execute_script("window.open()")
driver.switch_to.window(driver.window_handles[1])
driver.get(link)
sleep(10)
driver.close()
driver.switch_to.window(driver.window_handles[0])
'''
sleep(20)
driver.close()
driver.quit()
'''
from time import sleep
import random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
all_keys = ["american legends", "income", "funny videos", "python for kids"]
driver = webdriver.Chrome()
driver.get("https://engine.presearch.org")
search_query = driver.find_element_by_name('q')
search_query.send_keys(random.choice(all_keys))
search_query.send_keys(Keys.RETURN)
''' | en | 0.332556 | # TODO: Make a Long List of keyword # ctrl + shift + enter => open new tab with link driver.execute_script("window.open('about:blank', 'tab2');") driver.switch_to.window("tab2") driver.get(url) sleep(10) driver.close() driver.switch_to.window(driver.window_handles[0]) sel = Selector(text=driver.page_source) link = sel.xpath('//h3/a/@href').extract()[0] driver.execute_script("window.open()") driver.switch_to.window(driver.window_handles[1]) driver.get(link) sleep(10) driver.close() driver.switch_to.window(driver.window_handles[0]) from time import sleep import random from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains all_keys = ["american legends", "income", "funny videos", "python for kids"] driver = webdriver.Chrome() driver.get("https://engine.presearch.org") search_query = driver.find_element_by_name('q') search_query.send_keys(random.choice(all_keys)) search_query.send_keys(Keys.RETURN) | 2.999301 | 3 |
molecule/resources/tests/test_software.py | mesaguy/ansible-hashicorp | 2 | 6622837 | import os
import pytest
if os.getenv('HASHICORP_SOFTWARE_NAMES') is None:
SOFTWARE_NAMES = [
'boundary',
'consul',
'consul-template',
'envconsul',
'nomad',
'packer',
'sentinel',
'serf',
'terraform',
'vagrant',
'vault',
'vault-ssh-helper',
'waypoint',
]
else:
SOFTWARE_NAMES = os.getenv('HASHICORP_SOFTWARE_NAMES').split(',')
def check_binary_symlink(binary_file, dest_path):
assert binary_file.exists
assert binary_file.is_symlink
assert binary_file.user == 'root'
assert binary_file.group in ('staff', 'root')
binary_file.linked_to == dest_path
def software_base_directory(hashicorp_dir, name):
return f'{hashicorp_dir}/{name}'
def software_version(hashicorp_versions, name):
safe_name = name.replace("-", "_")
assert safe_name in hashicorp_versions
return hashicorp_versions[safe_name]
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_base_dir(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
directory = host.file(dir_base)
assert directory.exists
assert directory.is_directory
assert directory.group == 'root'
assert directory.user == 'root'
assert directory.mode == 0o755
# Contains two directories, "active" and the current version
directory_list = directory.listdir()
assert 'active' in directory_list
assert version in directory_list
assert len(directory.listdir()) == 2, f'Directories: {directory_list}'
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_version_dir(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
dir_version = f'{dir_base}/{version}'
directory = host.file(dir_version)
assert directory.exists
assert directory.is_directory
assert directory.group == 'root'
assert directory.user == 'root'
assert directory.mode == 0o755
# Contains just one file, the binary
assert len(directory.listdir()) == 1
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_binary(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
binary_file_path = f'{dir_base}/{version}/{name}'
binary_file = host.file(binary_file_path)
assert binary_file.exists
assert binary_file.is_file
assert binary_file.group == 'root'
assert binary_file.user == 'root'
assert binary_file.mode == 0o755
assert binary_file.size > 10000
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_binary_symlink(name, host, binary_symlink_dir, hashicorp_dir,
hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
dest_path = f'{dir_base}/{version}/{name}'
symlink_file = host.file(f'{binary_symlink_dir}/{name}')
assert symlink_file.exists
assert symlink_file.is_symlink
assert symlink_file.group in ('staff', 'root')
assert symlink_file.user == 'root'
symlink_file.linked_to == dest_path
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_active_dir_symlink(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
dest_path = f'{dir_base}/{version}'
active_symlink = host.file(f'{dir_base}/active')
assert active_symlink.exists
assert active_symlink.is_symlink
assert active_symlink.user == 'root'
assert active_symlink.group in ('staff', 'root')
active_symlink.linked_to == dest_path
| import os
import pytest
if os.getenv('HASHICORP_SOFTWARE_NAMES') is None:
SOFTWARE_NAMES = [
'boundary',
'consul',
'consul-template',
'envconsul',
'nomad',
'packer',
'sentinel',
'serf',
'terraform',
'vagrant',
'vault',
'vault-ssh-helper',
'waypoint',
]
else:
SOFTWARE_NAMES = os.getenv('HASHICORP_SOFTWARE_NAMES').split(',')
def check_binary_symlink(binary_file, dest_path):
assert binary_file.exists
assert binary_file.is_symlink
assert binary_file.user == 'root'
assert binary_file.group in ('staff', 'root')
binary_file.linked_to == dest_path
def software_base_directory(hashicorp_dir, name):
return f'{hashicorp_dir}/{name}'
def software_version(hashicorp_versions, name):
safe_name = name.replace("-", "_")
assert safe_name in hashicorp_versions
return hashicorp_versions[safe_name]
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_base_dir(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
directory = host.file(dir_base)
assert directory.exists
assert directory.is_directory
assert directory.group == 'root'
assert directory.user == 'root'
assert directory.mode == 0o755
# Contains two directories, "active" and the current version
directory_list = directory.listdir()
assert 'active' in directory_list
assert version in directory_list
assert len(directory.listdir()) == 2, f'Directories: {directory_list}'
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_version_dir(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
dir_version = f'{dir_base}/{version}'
directory = host.file(dir_version)
assert directory.exists
assert directory.is_directory
assert directory.group == 'root'
assert directory.user == 'root'
assert directory.mode == 0o755
# Contains just one file, the binary
assert len(directory.listdir()) == 1
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_binary(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
binary_file_path = f'{dir_base}/{version}/{name}'
binary_file = host.file(binary_file_path)
assert binary_file.exists
assert binary_file.is_file
assert binary_file.group == 'root'
assert binary_file.user == 'root'
assert binary_file.mode == 0o755
assert binary_file.size > 10000
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_binary_symlink(name, host, binary_symlink_dir, hashicorp_dir,
hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
dest_path = f'{dir_base}/{version}/{name}'
symlink_file = host.file(f'{binary_symlink_dir}/{name}')
assert symlink_file.exists
assert symlink_file.is_symlink
assert symlink_file.group in ('staff', 'root')
assert symlink_file.user == 'root'
symlink_file.linked_to == dest_path
@pytest.mark.parametrize('name', SOFTWARE_NAMES)
def test_software_active_dir_symlink(name, host, hashicorp_dir, hashicorp_versions):
dir_base = software_base_directory(hashicorp_dir, name)
version = software_version(hashicorp_versions, name)
dest_path = f'{dir_base}/{version}'
active_symlink = host.file(f'{dir_base}/active')
assert active_symlink.exists
assert active_symlink.is_symlink
assert active_symlink.user == 'root'
assert active_symlink.group in ('staff', 'root')
active_symlink.linked_to == dest_path
| en | 0.88401 | # Contains two directories, "active" and the current version # Contains just one file, the binary | 2.259529 | 2 |
mapper/train.py | yubin1219/CLIP_PJ | 1 | 6622838 | <reponame>yubin1219/CLIP_PJ
import os
import json
import sys
import pprint
from argparse import Namespace
sys.path.append(".")
sys.path.append("..")
from mapper.options.train_options import TrainOptions
from mapper.utils import ensure_checkpoint_exists
from mapper.training.coach import Coach
def main(opts):
os.makedirs(opts.exp_dir, exist_ok=True)
opts_dict = vars(opts)
pprint.pprint(opts_dict)
with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
json.dump(opts_dict, f, indent=4, sort_keys=True)
if opts.model_download:
ensure_checkpoint_exists(opts.stylegan_weights)
ensure_checkpoint_exists(opts.ir_se50_weights)
coach = Coach(opts)
coach.train()
if __name__ == '__main__':
"""
opt = {"exp_dir": "results/", # 저장할 파일
"model_download": True, # True : 사용할 pretrained 파일들 download
"data_mode": "color", # 변화시킬 style ["hair", "color", "female", "male", "multi"]
"text_embed_mode": None, # "clip_encoder" : CLIP text encoder로 얻은 text embedding vector 사용 , None : nn.embedding으로 얻은 text embedding vector 사용
"train_data": "train_data.pt", # "train_female" : female images만으로 구성 , "train_male" : male images만으로 구성
"test_data" : "test_data.pt", # "test_female" , "test_male"
"mapper_mode" : "Mapper_sum", # "Mapper_cat" : text vector와 concat , "Mapper_multi" : 하나의 모델에서 여러 style 학습
"mapper_type" : "LevelsMapper", # "SingleMapper" : mapper를 3부분(coarse/medium/fine)으로 나누지 않음
"no_coarse_mapper" : False, # True : coarse mapper 사용하지 않음 , False : coarse mapper 사용함
"no_medium_mapper" : False, # True : medium mapper 사용하지 않음 , False : medium mapper 사용함
"no_fine_mapper" : False, # True : fine mapper 사용하지 않음 , False : fine mapper 사용함
"train_dataset_size" : 5000, # 사용할 Train data 크기
"test_dataset_size" : 1000, # 사용할 Validation data 크기
"batch_size" : 1, # Train set batch size
"test_batch_size" : 1, # Validation set batch size
"workers" : 1, # Train 과정 시 사용할 GPU 개수
"test_workers" : 1, # Test 과정 시 사용할 GPU 개수
"learning_rate" : 0.5, # Learning rate
"optim_name" : "ranger", # optimaizer 선택 : "Adam" , "ranger"
"id_lambda" : 0.1, # identity loss 가중치
"clip_lambda" : 1, # clip loss 가중치
"latent_l2_lambda" : 0.8, # L2 loss 가중치
"stylegan_weights": "stylegan2-ffhq-config-f.pt", # stylegan2 pretrained model weights 파일
"stylegan_size" : 1024, # stylegan에서 생성된 이미지 크기
"ir_se50_weights" : "model_ir_se50.pth", # identity loss에 사용되는 pretrained model의 weights 파일
"max_steps" : 50000, # global step 몇 회까지 돌릴 것인지
"board_interval" : 50, # global step 몇 회 간격으로 loss 기록할 것인지
"image_interval" : 1000, # global step 몇 회 간격으로 generative image 저장할 것인지
"val_interval" : 1000, # global step 몇 회 간격으로 validate 수행할 것인지
"save_interval" : 1000, # global step 몇 회 간격으로 모델 저장할 것인지
"checkpoint_path" : None # 학습된 모델 파일 불러와서 진행할 시 입력 예) "color_sum.pt"
}
opts = Namespace(**opt)
"""
opts = TrainOptions().parse()
main(opts)
| import os
import json
import sys
import pprint
from argparse import Namespace
sys.path.append(".")
sys.path.append("..")
from mapper.options.train_options import TrainOptions
from mapper.utils import ensure_checkpoint_exists
from mapper.training.coach import Coach
def main(opts):
os.makedirs(opts.exp_dir, exist_ok=True)
opts_dict = vars(opts)
pprint.pprint(opts_dict)
with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
json.dump(opts_dict, f, indent=4, sort_keys=True)
if opts.model_download:
ensure_checkpoint_exists(opts.stylegan_weights)
ensure_checkpoint_exists(opts.ir_se50_weights)
coach = Coach(opts)
coach.train()
if __name__ == '__main__':
"""
opt = {"exp_dir": "results/", # 저장할 파일
"model_download": True, # True : 사용할 pretrained 파일들 download
"data_mode": "color", # 변화시킬 style ["hair", "color", "female", "male", "multi"]
"text_embed_mode": None, # "clip_encoder" : CLIP text encoder로 얻은 text embedding vector 사용 , None : nn.embedding으로 얻은 text embedding vector 사용
"train_data": "train_data.pt", # "train_female" : female images만으로 구성 , "train_male" : male images만으로 구성
"test_data" : "test_data.pt", # "test_female" , "test_male"
"mapper_mode" : "Mapper_sum", # "Mapper_cat" : text vector와 concat , "Mapper_multi" : 하나의 모델에서 여러 style 학습
"mapper_type" : "LevelsMapper", # "SingleMapper" : mapper를 3부분(coarse/medium/fine)으로 나누지 않음
"no_coarse_mapper" : False, # True : coarse mapper 사용하지 않음 , False : coarse mapper 사용함
"no_medium_mapper" : False, # True : medium mapper 사용하지 않음 , False : medium mapper 사용함
"no_fine_mapper" : False, # True : fine mapper 사용하지 않음 , False : fine mapper 사용함
"train_dataset_size" : 5000, # 사용할 Train data 크기
"test_dataset_size" : 1000, # 사용할 Validation data 크기
"batch_size" : 1, # Train set batch size
"test_batch_size" : 1, # Validation set batch size
"workers" : 1, # Train 과정 시 사용할 GPU 개수
"test_workers" : 1, # Test 과정 시 사용할 GPU 개수
"learning_rate" : 0.5, # Learning rate
"optim_name" : "ranger", # optimaizer 선택 : "Adam" , "ranger"
"id_lambda" : 0.1, # identity loss 가중치
"clip_lambda" : 1, # clip loss 가중치
"latent_l2_lambda" : 0.8, # L2 loss 가중치
"stylegan_weights": "stylegan2-ffhq-config-f.pt", # stylegan2 pretrained model weights 파일
"stylegan_size" : 1024, # stylegan에서 생성된 이미지 크기
"ir_se50_weights" : "model_ir_se50.pth", # identity loss에 사용되는 pretrained model의 weights 파일
"max_steps" : 50000, # global step 몇 회까지 돌릴 것인지
"board_interval" : 50, # global step 몇 회 간격으로 loss 기록할 것인지
"image_interval" : 1000, # global step 몇 회 간격으로 generative image 저장할 것인지
"val_interval" : 1000, # global step 몇 회 간격으로 validate 수행할 것인지
"save_interval" : 1000, # global step 몇 회 간격으로 모델 저장할 것인지
"checkpoint_path" : None # 학습된 모델 파일 불러와서 진행할 시 입력 예) "color_sum.pt"
}
opts = Namespace(**opt)
"""
opts = TrainOptions().parse()
main(opts) | ko | 0.764253 | opt = {"exp_dir": "results/", # 저장할 파일 "model_download": True, # True : 사용할 pretrained 파일들 download "data_mode": "color", # 변화시킬 style ["hair", "color", "female", "male", "multi"] "text_embed_mode": None, # "clip_encoder" : CLIP text encoder로 얻은 text embedding vector 사용 , None : nn.embedding으로 얻은 text embedding vector 사용 "train_data": "train_data.pt", # "train_female" : female images만으로 구성 , "train_male" : male images만으로 구성 "test_data" : "test_data.pt", # "test_female" , "test_male" "mapper_mode" : "Mapper_sum", # "Mapper_cat" : text vector와 concat , "Mapper_multi" : 하나의 모델에서 여러 style 학습 "mapper_type" : "LevelsMapper", # "SingleMapper" : mapper를 3부분(coarse/medium/fine)으로 나누지 않음 "no_coarse_mapper" : False, # True : coarse mapper 사용하지 않음 , False : coarse mapper 사용함 "no_medium_mapper" : False, # True : medium mapper 사용하지 않음 , False : medium mapper 사용함 "no_fine_mapper" : False, # True : fine mapper 사용하지 않음 , False : fine mapper 사용함 "train_dataset_size" : 5000, # 사용할 Train data 크기 "test_dataset_size" : 1000, # 사용할 Validation data 크기 "batch_size" : 1, # Train set batch size "test_batch_size" : 1, # Validation set batch size "workers" : 1, # Train 과정 시 사용할 GPU 개수 "test_workers" : 1, # Test 과정 시 사용할 GPU 개수 "learning_rate" : 0.5, # Learning rate "optim_name" : "ranger", # optimaizer 선택 : "Adam" , "ranger" "id_lambda" : 0.1, # identity loss 가중치 "clip_lambda" : 1, # clip loss 가중치 "latent_l2_lambda" : 0.8, # L2 loss 가중치 "stylegan_weights": "stylegan2-ffhq-config-f.pt", # stylegan2 pretrained model weights 파일 "stylegan_size" : 1024, # stylegan에서 생성된 이미지 크기 "ir_se50_weights" : "model_ir_se50.pth", # identity loss에 사용되는 pretrained model의 weights 파일 "max_steps" : 50000, # global step 몇 회까지 돌릴 것인지 "board_interval" : 50, # global step 몇 회 간격으로 loss 기록할 것인지 "image_interval" : 1000, # global step 몇 회 간격으로 generative image 저장할 것인지 "val_interval" : 1000, # global step 몇 회 간격으로 validate 수행할 것인지 "save_interval" : 1000, # global step 몇 회 간격으로 모델 저장할 것인지 "checkpoint_path" : None # 학습된 모델 파일 불러와서 진행할 시 입력 예) "color_sum.pt" } opts = Namespace(**opt) | 2.228352 | 2 |
experiments/radix/nv/radix.py | enjalot/adventures_in_opencl | 152 | 6622839 | <gh_stars>100-1000
#http://documen.tician.de/pyopencl/
import pyopencl as cl
import numpy as np
import struct
import timing
timings = timing.Timing()
#ctx = cl.create_some_context()
mf = cl.mem_flags
class Radix:
def __init__(self, max_elements, cta_size, dtype):
self.WARP_SIZE = 32
self.SCAN_WG_SIZE = 256
self.MIN_LARGE_ARRAY_SIZE = 4 * self.SCAN_WG_SIZE
self.bit_step = 4
self.cta_size = cta_size
self.uintsz = dtype.itemsize
plat = cl.get_platforms()[0]
device = plat.get_devices()[0]
self.ctx = cl.Context(devices=[device])
self.queue = cl.CommandQueue(self.ctx, device)
self.loadProgram()
if (max_elements % (cta_size * 4)) == 0:
num_blocks = max_elements / (cta_size * 4)
else:
num_blocks = max_elements / (cta_size * 4) + 1
#print "num_blocks: ", num_blocks
self.d_tempKeys = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements)
self.d_tempValues = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements)
self.mCounters = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
self.mCountersSum = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
self.mBlockOffsets = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
numscan = max_elements/2/cta_size*16
#print "numscan", numscan
if numscan >= self.MIN_LARGE_ARRAY_SIZE:
#MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE 1024
self.scan_buffer = cl.Buffer(self.ctx, mf.READ_WRITE, size = self.uintsz * numscan / 1024)
def loadProgram(self):
print "build scan"
f = open("Scan_b.cl", 'r')
fstr = "".join(f.readlines())
self.scan_prg = cl.Program(self.ctx, fstr).build()
print "build radix"
f = open("RadixSort.cl", 'r')
fstr = "".join(f.readlines())
self.radix_prg = cl.Program(self.ctx, fstr).build()
@timings("Radix Sort")
def sort(self, num, keys_np, values_np):
self.keys = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=keys_np)
self.values = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=values_np)
key_bits = keys_np.dtype.itemsize * 8
#print "numElements", num
#print "key_bits", key_bits
#print "bit_step", self.bit_step
i = 0
while key_bits > i*self.bit_step:
#print "i*bit_step", i*self.bit_step
self.step(self.bit_step, i*self.bit_step, num);
i += 1;
self.queue.finish()
cl.enqueue_read_buffer(self.queue, self.keys, keys_np).wait()
cl.enqueue_read_buffer(self.queue, self.values, values_np).wait()
return keys_np, values_np
@timings("Radix: step")
def step(self, nbits, startbit, num):
self.blocks(nbits, startbit, num)
self.queue.finish()
self.find_offsets(startbit, num)
self.queue.finish()
array_length = num/2/self.cta_size*16
#print "array length in step", array_length
if array_length < self.MIN_LARGE_ARRAY_SIZE:
self.naive_scan(num)
else:
self.scan(self.mCountersSum, self.mCounters, 1, array_length);
self.queue.finish()
#self.naive_scan(num)
self.reorder(startbit, num)
self.queue.finish()
@timings("Radix: blocks")
def blocks(self, nbits, startbit, num):
totalBlocks = num/4/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
blocks_args = ( self.keys,
self.values,
self.d_tempKeys,
self.d_tempValues,
np.uint32(nbits),
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(4*self.cta_size*self.uintsz),
cl.LocalMemory(4*self.cta_size*self.uintsz)
)
self.radix_prg.radixSortBlocksKeysValues(self.queue, global_size, local_size, *(blocks_args)).wait()
#self.radix_prg.radixSortBlocksKeysOnly(self.queue, global_size, local_size, *(blocks_args)).wait()
@timings("Radix: find offsets")
def find_offsets(self, startbit, num):
totalBlocks = num/2/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
offsets_args = ( self.d_tempKeys,
self.d_tempValues,
self.mCounters,
self.mBlockOffsets,
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(2*self.cta_size*self.uintsz),
)
self.radix_prg.findRadixOffsets(self.queue, global_size, local_size, *(offsets_args)).wait()
@timings("Radix: naive scan")
def naive_scan(self, num):
nhist = num/2/self.cta_size*16
global_size = (nhist,)
local_size = (nhist,)
extra_space = nhist / 16 #NUM_BANKS defined as 16 in RadixSort.cpp
shared_mem_size = self.uintsz * (nhist + extra_space)
scan_args = ( self.mCountersSum,
self.mCounters,
np.uint32(nhist),
cl.LocalMemory(2*shared_mem_size)
)
self.radix_prg.scanNaive(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Radix: scan")
def scan(self, dst, src, batch_size, array_length):
self.scan_local1( dst,
src,
batch_size * array_length / (4 * self.SCAN_WG_SIZE),
4 * self.SCAN_WG_SIZE)
self.queue.finish()
self.scan_local2( dst,
src,
batch_size,
array_length / (4 * self.SCAN_WG_SIZE))
self.queue.finish()
self.scan_update(dst, batch_size * array_length / (4 * self.SCAN_WG_SIZE))
self.queue.finish()
@timings("Scan: local1")
def scan_local1(self, dst, src, n, size):
global_size = (n * size / 4,)
local_size = (self.SCAN_WG_SIZE,)
scan_args = ( dst,
src,
cl.LocalMemory(2 * self.SCAN_WG_SIZE * self.uintsz),
np.uint32(size)
)
self.scan_prg.scanExclusiveLocal1(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: local2")
def scan_local2(self, dst, src, n, size):
elements = n * size
dividend = elements
divisor = self.SCAN_WG_SIZE
if dividend % divisor == 0:
global_size = (dividend,)
else:
global_size = (dividend - dividend % divisor + divisor,)
local_size = (self.SCAN_WG_SIZE, )
scan_args = ( self.scan_buffer,
dst,
src,
cl.LocalMemory(2 * self.SCAN_WG_SIZE * self.uintsz),
np.uint32(elements),
np.uint32(size)
)
self.scan_prg.scanExclusiveLocal2(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: update")
def scan_update(self, dst, n):
global_size = (n * self.SCAN_WG_SIZE,)
local_size = (self.SCAN_WG_SIZE,)
scan_args = ( dst,
self.scan_buffer
)
self.scan_prg.uniformUpdate(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: reorder")
def reorder(self, startbit, num):
totalBlocks = num/2/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
reorder_args = ( self.keys,
self.values,
self.d_tempKeys,
self.d_tempValues,
self.mBlockOffsets,
self.mCountersSum,
self.mCounters,
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(2*self.cta_size*self.uintsz),
cl.LocalMemory(2*self.cta_size*self.uintsz)
)
self.radix_prg.reorderDataKeysValues(self.queue, global_size, local_size, *(reorder_args))
#self.radix_prg.reorderDataKeysOnly(self.queue, global_size, local_size, *(reorder_args))
if __name__ == "__main__":
n = 1048576*2
#n = 32768*2
#n = 16384
#n = 8192
hashes = np.ndarray((n,1), dtype=np.uint32)
indices = np.ndarray((n,1), dtype=np.uint32)
for i in xrange(0,n):
hashes[i] = n - i
indices[i] = i
npsorted = np.sort(hashes,0)
print "hashes before:", hashes[0:20].T
print "indices before: ", indices[0:20].T
radix = Radix(n, 128, hashes.dtype)
#num_to_sort = 32768
num_to_sort = n
hashes, indices = radix.sort(num_to_sort, hashes, indices)
print "hashes after:", hashes[0:20].T
print "indices after: ", indices[0:20].T
print np.linalg.norm(hashes - npsorted)
print timings
| #http://documen.tician.de/pyopencl/
import pyopencl as cl
import numpy as np
import struct
import timing
timings = timing.Timing()
#ctx = cl.create_some_context()
mf = cl.mem_flags
class Radix:
def __init__(self, max_elements, cta_size, dtype):
self.WARP_SIZE = 32
self.SCAN_WG_SIZE = 256
self.MIN_LARGE_ARRAY_SIZE = 4 * self.SCAN_WG_SIZE
self.bit_step = 4
self.cta_size = cta_size
self.uintsz = dtype.itemsize
plat = cl.get_platforms()[0]
device = plat.get_devices()[0]
self.ctx = cl.Context(devices=[device])
self.queue = cl.CommandQueue(self.ctx, device)
self.loadProgram()
if (max_elements % (cta_size * 4)) == 0:
num_blocks = max_elements / (cta_size * 4)
else:
num_blocks = max_elements / (cta_size * 4) + 1
#print "num_blocks: ", num_blocks
self.d_tempKeys = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements)
self.d_tempValues = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * max_elements)
self.mCounters = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
self.mCountersSum = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
self.mBlockOffsets = cl.Buffer(self.ctx, mf.READ_WRITE, size=self.uintsz * self.WARP_SIZE * num_blocks)
numscan = max_elements/2/cta_size*16
#print "numscan", numscan
if numscan >= self.MIN_LARGE_ARRAY_SIZE:
#MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE 1024
self.scan_buffer = cl.Buffer(self.ctx, mf.READ_WRITE, size = self.uintsz * numscan / 1024)
def loadProgram(self):
print "build scan"
f = open("Scan_b.cl", 'r')
fstr = "".join(f.readlines())
self.scan_prg = cl.Program(self.ctx, fstr).build()
print "build radix"
f = open("RadixSort.cl", 'r')
fstr = "".join(f.readlines())
self.radix_prg = cl.Program(self.ctx, fstr).build()
@timings("Radix Sort")
def sort(self, num, keys_np, values_np):
self.keys = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=keys_np)
self.values = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=values_np)
key_bits = keys_np.dtype.itemsize * 8
#print "numElements", num
#print "key_bits", key_bits
#print "bit_step", self.bit_step
i = 0
while key_bits > i*self.bit_step:
#print "i*bit_step", i*self.bit_step
self.step(self.bit_step, i*self.bit_step, num);
i += 1;
self.queue.finish()
cl.enqueue_read_buffer(self.queue, self.keys, keys_np).wait()
cl.enqueue_read_buffer(self.queue, self.values, values_np).wait()
return keys_np, values_np
@timings("Radix: step")
def step(self, nbits, startbit, num):
self.blocks(nbits, startbit, num)
self.queue.finish()
self.find_offsets(startbit, num)
self.queue.finish()
array_length = num/2/self.cta_size*16
#print "array length in step", array_length
if array_length < self.MIN_LARGE_ARRAY_SIZE:
self.naive_scan(num)
else:
self.scan(self.mCountersSum, self.mCounters, 1, array_length);
self.queue.finish()
#self.naive_scan(num)
self.reorder(startbit, num)
self.queue.finish()
@timings("Radix: blocks")
def blocks(self, nbits, startbit, num):
totalBlocks = num/4/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
blocks_args = ( self.keys,
self.values,
self.d_tempKeys,
self.d_tempValues,
np.uint32(nbits),
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(4*self.cta_size*self.uintsz),
cl.LocalMemory(4*self.cta_size*self.uintsz)
)
self.radix_prg.radixSortBlocksKeysValues(self.queue, global_size, local_size, *(blocks_args)).wait()
#self.radix_prg.radixSortBlocksKeysOnly(self.queue, global_size, local_size, *(blocks_args)).wait()
@timings("Radix: find offsets")
def find_offsets(self, startbit, num):
totalBlocks = num/2/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
offsets_args = ( self.d_tempKeys,
self.d_tempValues,
self.mCounters,
self.mBlockOffsets,
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(2*self.cta_size*self.uintsz),
)
self.radix_prg.findRadixOffsets(self.queue, global_size, local_size, *(offsets_args)).wait()
@timings("Radix: naive scan")
def naive_scan(self, num):
nhist = num/2/self.cta_size*16
global_size = (nhist,)
local_size = (nhist,)
extra_space = nhist / 16 #NUM_BANKS defined as 16 in RadixSort.cpp
shared_mem_size = self.uintsz * (nhist + extra_space)
scan_args = ( self.mCountersSum,
self.mCounters,
np.uint32(nhist),
cl.LocalMemory(2*shared_mem_size)
)
self.radix_prg.scanNaive(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Radix: scan")
def scan(self, dst, src, batch_size, array_length):
self.scan_local1( dst,
src,
batch_size * array_length / (4 * self.SCAN_WG_SIZE),
4 * self.SCAN_WG_SIZE)
self.queue.finish()
self.scan_local2( dst,
src,
batch_size,
array_length / (4 * self.SCAN_WG_SIZE))
self.queue.finish()
self.scan_update(dst, batch_size * array_length / (4 * self.SCAN_WG_SIZE))
self.queue.finish()
@timings("Scan: local1")
def scan_local1(self, dst, src, n, size):
global_size = (n * size / 4,)
local_size = (self.SCAN_WG_SIZE,)
scan_args = ( dst,
src,
cl.LocalMemory(2 * self.SCAN_WG_SIZE * self.uintsz),
np.uint32(size)
)
self.scan_prg.scanExclusiveLocal1(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: local2")
def scan_local2(self, dst, src, n, size):
elements = n * size
dividend = elements
divisor = self.SCAN_WG_SIZE
if dividend % divisor == 0:
global_size = (dividend,)
else:
global_size = (dividend - dividend % divisor + divisor,)
local_size = (self.SCAN_WG_SIZE, )
scan_args = ( self.scan_buffer,
dst,
src,
cl.LocalMemory(2 * self.SCAN_WG_SIZE * self.uintsz),
np.uint32(elements),
np.uint32(size)
)
self.scan_prg.scanExclusiveLocal2(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: update")
def scan_update(self, dst, n):
global_size = (n * self.SCAN_WG_SIZE,)
local_size = (self.SCAN_WG_SIZE,)
scan_args = ( dst,
self.scan_buffer
)
self.scan_prg.uniformUpdate(self.queue, global_size, local_size, *(scan_args)).wait()
@timings("Scan: reorder")
def reorder(self, startbit, num):
totalBlocks = num/2/self.cta_size
global_size = (self.cta_size*totalBlocks,)
local_size = (self.cta_size,)
reorder_args = ( self.keys,
self.values,
self.d_tempKeys,
self.d_tempValues,
self.mBlockOffsets,
self.mCountersSum,
self.mCounters,
np.uint32(startbit),
np.uint32(num),
np.uint32(totalBlocks),
cl.LocalMemory(2*self.cta_size*self.uintsz),
cl.LocalMemory(2*self.cta_size*self.uintsz)
)
self.radix_prg.reorderDataKeysValues(self.queue, global_size, local_size, *(reorder_args))
#self.radix_prg.reorderDataKeysOnly(self.queue, global_size, local_size, *(reorder_args))
if __name__ == "__main__":
n = 1048576*2
#n = 32768*2
#n = 16384
#n = 8192
hashes = np.ndarray((n,1), dtype=np.uint32)
indices = np.ndarray((n,1), dtype=np.uint32)
for i in xrange(0,n):
hashes[i] = n - i
indices[i] = i
npsorted = np.sort(hashes,0)
print "hashes before:", hashes[0:20].T
print "indices before: ", indices[0:20].T
radix = Radix(n, 128, hashes.dtype)
#num_to_sort = 32768
num_to_sort = n
hashes, indices = radix.sort(num_to_sort, hashes, indices)
print "hashes after:", hashes[0:20].T
print "indices after: ", indices[0:20].T
print np.linalg.norm(hashes - npsorted)
print timings | en | 0.247864 | #http://documen.tician.de/pyopencl/ #ctx = cl.create_some_context() #print "num_blocks: ", num_blocks #print "numscan", numscan #MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE 1024 #print "numElements", num #print "key_bits", key_bits #print "bit_step", self.bit_step #print "i*bit_step", i*self.bit_step #print "array length in step", array_length #self.naive_scan(num) #self.radix_prg.radixSortBlocksKeysOnly(self.queue, global_size, local_size, *(blocks_args)).wait() #NUM_BANKS defined as 16 in RadixSort.cpp #self.radix_prg.reorderDataKeysOnly(self.queue, global_size, local_size, *(reorder_args)) #n = 32768*2 #n = 16384 #n = 8192 #num_to_sort = 32768 | 2.305816 | 2 |
r2lm.py | hkaneko1985/r2lm | 1 | 6622840 | <gh_stars>1-10
# r^2 based on the latest measured y-values
import numpy as np
# Calculate r^2 based on the latest measured y-values
# measured_y and estimated_y must be vectors.
def r2lm(measured_y, estimated_y):
measured_y = np.array(measured_y).flatten()
estimated_y = np.array(estimated_y).flatten()
return float(1 - sum((measured_y - estimated_y) ** 2) / sum((measured_y[1:] - measured_y[:-1]) ** 2))
| # r^2 based on the latest measured y-values
import numpy as np
# Calculate r^2 based on the latest measured y-values
# measured_y and estimated_y must be vectors.
def r2lm(measured_y, estimated_y):
measured_y = np.array(measured_y).flatten()
estimated_y = np.array(estimated_y).flatten()
return float(1 - sum((measured_y - estimated_y) ** 2) / sum((measured_y[1:] - measured_y[:-1]) ** 2)) | en | 0.927792 | # r^2 based on the latest measured y-values # Calculate r^2 based on the latest measured y-values # measured_y and estimated_y must be vectors. | 3.102416 | 3 |
apps/patrimonio/apps/combustivel/apps/km/views.py | mequetrefe-do-subtroco/web_constel | 1 | 6622841 | <reponame>mequetrefe-do-subtroco/web_constel
import datetime
from pprint import pprint
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from constel.forms import FormFiltraQ, FormDataInicialFinalFuncionario
from constel.apps.controle_acessos.decorator import permission
from . import menu as mn, services, forms
@login_required()
def view_menu_principal(request: HttpRequest) -> HttpResponse:
context = mn.principal(request)
return render(request, "constel/v2/app.html", context)
def view_menu_registros(request: HttpRequest) -> HttpResponse:
context = mn.registros(request)
return render(request, "constel/v2/app.html", context)
def view_menu_consultas(request: HttpRequest) -> HttpResponse:
context = mn.consultas(request)
return render(request, "constel/v2/app.html", context)
def view_menu_edicoes(request: HttpRequest) -> HttpResponse:
context = mn.edicoes(request)
return render(request, "constel/v2/app.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_consulta_km_time(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q(user__id=request.user.id)
if q != "":
query = query & Q(
Q(user_to__first_name__icontains=q) |
Q(user_to__last_name__icontains=q) |
Q(user_to__username__icontains=q)
)
itens = services.query_km_team(query)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_km_time.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "patrimonio")
def view_consulta_km_hoje(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q()
if q != "":
query = query & Q(
Q(user_to__first_name__icontains=q) |
Q(user_to__last_name__icontains=q) |
Q(user_to__username__icontains=q) |
Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q) |
Q(user__username__icontains=q)
)
itens = services.query_km(query)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_km_hoje.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "patrimonio")
def view_consulta_km_pendencias_hoje(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q()
if q != "":
query = query & Q(
Q(gestor__first_name__icontains=q) |
Q(gestor__last_name__icontains=q) |
Q(gestor__username__icontains=q)
)
itens = services.query_today_pending(query)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_pendencias_hoje.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "patrimonio")
def view_consulta_km_pendencias_hoje_detalhe(request: HttpRequest, gestor_id: int) -> HttpResponse:
menu = mn.consultas(request)
itens = services.query_today_pending_detail(gestor_id)
pprint(list(itens))
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_pendencias_hoje_detalhes.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_consulta_registros(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
funcionario = request.GET.get("funcionario", "")
data_inicial = request.GET.get("data_inicial", "")
data_final = request.GET.get("data_final", "")
form = FormDataInicialFinalFuncionario(initial={
"funcionario": funcionario,
"data_inicial": data_inicial,
"data_final": data_final
})
itens = services.get_km(data_inicial, data_final, funcionario)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_registros.html", context)
def view_menu_relatorios(request: HttpRequest) -> HttpResponse:
context = mn.relatorios(request)
return render(request, "constel/v2/app.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_relatorio_geral(request: HttpRequest) -> HttpResponse:
menu = mn.relatorios(request)
funcionario = request.GET.get("funcionario", "")
data_inicial = request.GET.get("data_inicial", datetime.date.today().replace(day=1).isoformat())
data_final = request.GET.get("data_final", "")
form = FormDataInicialFinalFuncionario(initial={
"funcionario": funcionario,
"data_inicial": data_inicial,
"data_final": data_final
})
itens = services.query_general_report(data_inicial, data_final, funcionario)
context = {
"itens": itens,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/relatorio_geral.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_inicial(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q(gestor__id=request.user.id)
if q != "":
query = query & Q(
Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q) |
Q(user__username__icontains=q)
)
itens = services.get_user_team_initial_km(query).values(
"user__id",
"user__username",
"user__first_name",
"user__last_name",
)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar",
"details": "patrimonio_combustivel_km_registros_inicial"
}
context.update(menu)
return render(request, "km/registrar_km_inicial.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_final(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q()
if q != "":
query = query & Q(
Q(user_to__first_name__icontains=q) |
Q(user_to__last_name__icontains=q) |
Q(user_to__username__icontains=q)
)
itens = services.get_user_team_final_km(request.user, query).values(
"id",
"date",
"user_to__id",
"user_to__username",
"user_to__first_name",
"user_to__last_name",
).order_by(
"user_to__first_name",
"user_to__last_name",
"date",
)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar",
"details": "patrimonio_combustivel_km_registros_final"
}
context.update(menu)
return render(request, "km/registrar_km_final.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_inicial_detalhes(request: HttpRequest, user_id: int) -> HttpResponse:
menu = mn.registros(request)
form = forms.KmForm(user_id=user_id, gestor_id=request.user.id, data=request.POST or None)
if request.method == "POST":
if form.is_valid():
km = form.cleaned_data["km"]
services.set_user_team_initial_km(user_id, request.user.id, km)
return HttpResponseRedirect("/patrimonio/combustivel/km/registros/inicial")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_final_detalhes(request: HttpRequest, user_id: int, km_id: int) -> HttpResponse:
menu = mn.registros(request)
form = forms.KmForm(km_id=km_id, gestor_id=request.user.id, user_id=user_id, data=request.POST or None)
if request.method == "POST":
if form.is_valid():
km = form.cleaned_data["km"]
services.set_user_team_final_km(km_id, km)
return HttpResponseRedirect("/patrimonio/combustivel/km/registros/final")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_editar_registro(request: HttpRequest) -> HttpResponse:
menu = mn.edicoes(request)
form = forms.RegistroForm(data=request.POST or None)
if request.method == "POST":
if form.is_valid():
registro = services.is_km_register(form.cleaned_data.get("funcionario"), form.cleaned_data.get("data"))
return HttpResponseRedirect(f"/patrimonio/combustivel/km/edicoes/registro/{registro.id}")
context = {
"form": form,
"form_submit_text": "Avançar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_editar_registro_detalhe(request: HttpRequest, registro_id: int) -> HttpResponse:
menu = mn.edicoes(request)
registro = services.get_km_by_id(registro_id)
form = forms.EditaRegistroForm(data=request.POST or None, instance=registro)
if request.method == "POST":
if form.is_valid():
form.save(commit=True)
return HttpResponseRedirect(f"/patrimonio/combustivel/km/edicoes/registro/")
context = {
"form": form,
"registro": registro,
"form_submit_text": "Salvar"
}
context.update(menu)
return render(request, "km/editar_registro_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_registrar_falta(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
form = forms.RegistraFaltaForm(data=request.POST or None)
if request.method == "POST":
if form.is_valid():
services.set_falta(request.user, form.cleaned_data.get("funcionario"), form.cleaned_data.get("data"))
return HttpResponseRedirect(f"/patrimonio/combustivel/km/registros/falta/")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_registrar_pendencia(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
form = forms.RegistraPendenciaForm(data=request.POST or None)
if request.method == "POST":
if form.is_valid():
services.set_pendencia(
request.user,
form.cleaned_data.get("funcionario"),
form.cleaned_data.get("data"),
form.cleaned_data.get("km_initial"),
form.cleaned_data.get("km_final"),
)
return HttpResponseRedirect(f"/patrimonio/combustivel/km/registros/pendencia/")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_inicial_sem_equipe(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
form = forms.KmFormFuncionario(gestor_id=request.user.id, data=request.POST or None)
if request.method == "POST":
if form.is_valid():
km = form.cleaned_data["km"]
services.set_user_team_initial_km(form.cleaned_data["funcionario"], request.user.id, km)
return HttpResponseRedirect("/patrimonio/combustivel/km/registros")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
| import datetime
from pprint import pprint
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from constel.forms import FormFiltraQ, FormDataInicialFinalFuncionario
from constel.apps.controle_acessos.decorator import permission
from . import menu as mn, services, forms
@login_required()
def view_menu_principal(request: HttpRequest) -> HttpResponse:
context = mn.principal(request)
return render(request, "constel/v2/app.html", context)
def view_menu_registros(request: HttpRequest) -> HttpResponse:
context = mn.registros(request)
return render(request, "constel/v2/app.html", context)
def view_menu_consultas(request: HttpRequest) -> HttpResponse:
context = mn.consultas(request)
return render(request, "constel/v2/app.html", context)
def view_menu_edicoes(request: HttpRequest) -> HttpResponse:
context = mn.edicoes(request)
return render(request, "constel/v2/app.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_consulta_km_time(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q(user__id=request.user.id)
if q != "":
query = query & Q(
Q(user_to__first_name__icontains=q) |
Q(user_to__last_name__icontains=q) |
Q(user_to__username__icontains=q)
)
itens = services.query_km_team(query)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_km_time.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "patrimonio")
def view_consulta_km_hoje(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q()
if q != "":
query = query & Q(
Q(user_to__first_name__icontains=q) |
Q(user_to__last_name__icontains=q) |
Q(user_to__username__icontains=q) |
Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q) |
Q(user__username__icontains=q)
)
itens = services.query_km(query)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_km_hoje.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "patrimonio")
def view_consulta_km_pendencias_hoje(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q()
if q != "":
query = query & Q(
Q(gestor__first_name__icontains=q) |
Q(gestor__last_name__icontains=q) |
Q(gestor__username__icontains=q)
)
itens = services.query_today_pending(query)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_pendencias_hoje.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "patrimonio")
def view_consulta_km_pendencias_hoje_detalhe(request: HttpRequest, gestor_id: int) -> HttpResponse:
menu = mn.consultas(request)
itens = services.query_today_pending_detail(gestor_id)
pprint(list(itens))
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_pendencias_hoje_detalhes.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_consulta_registros(request: HttpRequest) -> HttpResponse:
menu = mn.consultas(request)
funcionario = request.GET.get("funcionario", "")
data_inicial = request.GET.get("data_inicial", "")
data_final = request.GET.get("data_final", "")
form = FormDataInicialFinalFuncionario(initial={
"funcionario": funcionario,
"data_inicial": data_inicial,
"data_final": data_final
})
itens = services.get_km(data_inicial, data_final, funcionario)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/consultar_registros.html", context)
def view_menu_relatorios(request: HttpRequest) -> HttpResponse:
context = mn.relatorios(request)
return render(request, "constel/v2/app.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_relatorio_geral(request: HttpRequest) -> HttpResponse:
menu = mn.relatorios(request)
funcionario = request.GET.get("funcionario", "")
data_inicial = request.GET.get("data_inicial", datetime.date.today().replace(day=1).isoformat())
data_final = request.GET.get("data_final", "")
form = FormDataInicialFinalFuncionario(initial={
"funcionario": funcionario,
"data_inicial": data_inicial,
"data_final": data_final
})
itens = services.query_general_report(data_inicial, data_final, funcionario)
context = {
"itens": itens,
"form": form,
"form_submit_text": "Filtrar"
}
context.update(menu)
return render(request, "km/relatorio_geral.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_inicial(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q(gestor__id=request.user.id)
if q != "":
query = query & Q(
Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q) |
Q(user__username__icontains=q)
)
itens = services.get_user_team_initial_km(query).values(
"user__id",
"user__username",
"user__first_name",
"user__last_name",
)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar",
"details": "patrimonio_combustivel_km_registros_inicial"
}
context.update(menu)
return render(request, "km/registrar_km_inicial.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_final(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
q = request.GET.get("q", "")
form = FormFiltraQ(
"nome ou matrícula",
initial={
"q": q,
}
)
query = Q()
if q != "":
query = query & Q(
Q(user_to__first_name__icontains=q) |
Q(user_to__last_name__icontains=q) |
Q(user_to__username__icontains=q)
)
itens = services.get_user_team_final_km(request.user, query).values(
"id",
"date",
"user_to__id",
"user_to__username",
"user_to__first_name",
"user_to__last_name",
).order_by(
"user_to__first_name",
"user_to__last_name",
"date",
)
paginator = Paginator(itens, 50)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"form": form,
"form_submit_text": "Filtrar",
"details": "patrimonio_combustivel_km_registros_final"
}
context.update(menu)
return render(request, "km/registrar_km_final.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_inicial_detalhes(request: HttpRequest, user_id: int) -> HttpResponse:
menu = mn.registros(request)
form = forms.KmForm(user_id=user_id, gestor_id=request.user.id, data=request.POST or None)
if request.method == "POST":
if form.is_valid():
km = form.cleaned_data["km"]
services.set_user_team_initial_km(user_id, request.user.id, km)
return HttpResponseRedirect("/patrimonio/combustivel/km/registros/inicial")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_final_detalhes(request: HttpRequest, user_id: int, km_id: int) -> HttpResponse:
menu = mn.registros(request)
form = forms.KmForm(km_id=km_id, gestor_id=request.user.id, user_id=user_id, data=request.POST or None)
if request.method == "POST":
if form.is_valid():
km = form.cleaned_data["km"]
services.set_user_team_final_km(km_id, km)
return HttpResponseRedirect("/patrimonio/combustivel/km/registros/final")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_editar_registro(request: HttpRequest) -> HttpResponse:
menu = mn.edicoes(request)
form = forms.RegistroForm(data=request.POST or None)
if request.method == "POST":
if form.is_valid():
registro = services.is_km_register(form.cleaned_data.get("funcionario"), form.cleaned_data.get("data"))
return HttpResponseRedirect(f"/patrimonio/combustivel/km/edicoes/registro/{registro.id}")
context = {
"form": form,
"form_submit_text": "Avançar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_editar_registro_detalhe(request: HttpRequest, registro_id: int) -> HttpResponse:
menu = mn.edicoes(request)
registro = services.get_km_by_id(registro_id)
form = forms.EditaRegistroForm(data=request.POST or None, instance=registro)
if request.method == "POST":
if form.is_valid():
form.save(commit=True)
return HttpResponseRedirect(f"/patrimonio/combustivel/km/edicoes/registro/")
context = {
"form": form,
"registro": registro,
"form_submit_text": "Salvar"
}
context.update(menu)
return render(request, "km/editar_registro_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_registrar_falta(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
form = forms.RegistraFaltaForm(data=request.POST or None)
if request.method == "POST":
if form.is_valid():
services.set_falta(request.user, form.cleaned_data.get("funcionario"), form.cleaned_data.get("data"))
return HttpResponseRedirect(f"/patrimonio/combustivel/km/registros/falta/")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km", "gestor", "patrimonio - combustivel")
def view_registrar_pendencia(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
form = forms.RegistraPendenciaForm(data=request.POST or None)
if request.method == "POST":
if form.is_valid():
services.set_pendencia(
request.user,
form.cleaned_data.get("funcionario"),
form.cleaned_data.get("data"),
form.cleaned_data.get("km_initial"),
form.cleaned_data.get("km_final"),
)
return HttpResponseRedirect(f"/patrimonio/combustivel/km/registros/pendencia/")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context)
@login_required()
@permission("patrimonio - combustivel - km")
def view_registrar_km_inicial_sem_equipe(request: HttpRequest) -> HttpResponse:
menu = mn.registros(request)
form = forms.KmFormFuncionario(gestor_id=request.user.id, data=request.POST or None)
if request.method == "POST":
if form.is_valid():
km = form.cleaned_data["km"]
services.set_user_team_initial_km(form.cleaned_data["funcionario"], request.user.id, km)
return HttpResponseRedirect("/patrimonio/combustivel/km/registros")
context = {
"form": form,
"form_submit_text": "Registrar"
}
context.update(menu)
return render(request, "km/registrar_km_form.html", context) | none | 1 | 2.044914 | 2 | |
DM/models/comp_encoder.py | moonfoam/fewshot-font-generation | 49 | 6622842 | """
DMFont
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
from functools import partial
import torch
import torch.nn as nn
from base.modules import ConvBlock, ResBlock, GCBlock, SAFFNBlock
class ComponentEncoder(nn.Module):
def __init__(self, n_heads=3):
super().__init__()
self.n_heads = n_heads
ConvBlk = partial(ConvBlock, norm='none', activ='relu', pad_type='zero')
ResBlk = partial(ResBlock, norm='none', activ='relu', pad_type='zero')
SAFFNBlk = partial(SAFFNBlock, C_qk_ratio=0.5, n_heads=2, area=False, ffn_mult=2)
C = 32
self.body = nn.ModuleList([
ConvBlk(1, C, 3, 1, 1, norm='none', activ='none'), # 128x128
ConvBlk(C*1, C*2, 3, 1, 1, downsample=True), # 64x64
GCBlock(C*2),
ConvBlk(C*2, C*4, 3, 1, 1, downsample=True), # 32x32
SAFFNBlk(C*4, size=32, rel_pos=True),
])
self.heads = nn.ModuleList([
nn.ModuleList([
ResBlk(C*4, C*4, 3, 1),
SAFFNBlk(C*4, size=32, rel_pos=False),
ResBlk(C*4, C*4, 3, 1),
ResBlk(C*4, C*8, 3, 1, downsample=True), # 16x16
SAFFNBlk(C*8, size=16, rel_pos=False),
ResBlk(C*8, C*8)
]) for _ in range(n_heads)
])
self.skip_layer_idx = 2
self.feat_shape = {"last": (C*8, 16, 16), "skip": (C*4, 32, 32)}
def forward(self, x):
ret_feats = {}
for layer in self.body:
x = layer(x)
xs = [x] * self.n_heads
n_layers = len(self.heads[0])
for lidx in range(n_layers):
for hidx, head in enumerate(self.heads):
layer = head[lidx]
xs[hidx] = layer(xs[hidx])
if lidx == self.skip_layer_idx:
ret_feats["skip"] = torch.stack(xs, dim=1)
ret_feats["last"] = torch.stack(xs, dim=1)
return ret_feats
def get_feat_shape(self):
return self.feat_shape
| """
DMFont
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
from functools import partial
import torch
import torch.nn as nn
from base.modules import ConvBlock, ResBlock, GCBlock, SAFFNBlock
class ComponentEncoder(nn.Module):
def __init__(self, n_heads=3):
super().__init__()
self.n_heads = n_heads
ConvBlk = partial(ConvBlock, norm='none', activ='relu', pad_type='zero')
ResBlk = partial(ResBlock, norm='none', activ='relu', pad_type='zero')
SAFFNBlk = partial(SAFFNBlock, C_qk_ratio=0.5, n_heads=2, area=False, ffn_mult=2)
C = 32
self.body = nn.ModuleList([
ConvBlk(1, C, 3, 1, 1, norm='none', activ='none'), # 128x128
ConvBlk(C*1, C*2, 3, 1, 1, downsample=True), # 64x64
GCBlock(C*2),
ConvBlk(C*2, C*4, 3, 1, 1, downsample=True), # 32x32
SAFFNBlk(C*4, size=32, rel_pos=True),
])
self.heads = nn.ModuleList([
nn.ModuleList([
ResBlk(C*4, C*4, 3, 1),
SAFFNBlk(C*4, size=32, rel_pos=False),
ResBlk(C*4, C*4, 3, 1),
ResBlk(C*4, C*8, 3, 1, downsample=True), # 16x16
SAFFNBlk(C*8, size=16, rel_pos=False),
ResBlk(C*8, C*8)
]) for _ in range(n_heads)
])
self.skip_layer_idx = 2
self.feat_shape = {"last": (C*8, 16, 16), "skip": (C*4, 32, 32)}
def forward(self, x):
ret_feats = {}
for layer in self.body:
x = layer(x)
xs = [x] * self.n_heads
n_layers = len(self.heads[0])
for lidx in range(n_layers):
for hidx, head in enumerate(self.heads):
layer = head[lidx]
xs[hidx] = layer(xs[hidx])
if lidx == self.skip_layer_idx:
ret_feats["skip"] = torch.stack(xs, dim=1)
ret_feats["last"] = torch.stack(xs, dim=1)
return ret_feats
def get_feat_shape(self):
return self.feat_shape
| en | 0.400977 | DMFont Copyright (c) 2020-present NAVER Corp. MIT license # 128x128 # 64x64 # 32x32 # 16x16 | 2.076434 | 2 |
src/gui.py | Jack477/CommanderPi_rockpi | 3 | 6622843 | #!/usr/bin/python
import sys
import os
import resources as rs
import update as up
import tkinter as tk
import theme as th
import importlib
import webbrowser
from tkinter import messagebox as msb
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
### TODO: Move change_theme function to theme.py?
### split resources.py into smaller files
### move window_list from theme.py to resources
home_path = sys.argv[1]
def change_theme(master):
if int(th.color_mode)==0:
print("Setting color theme to 1")
th.color_mode=1
else:
th.color_mode=0
rs.config.set('DEFAULT', 'color_mode', str(th.color_mode))
with open(home_path+'/CommanderPi/src/cpi.config', 'w') as configfile:
rs.config.write(configfile)
th.set_theme(master)
#print(th.color_mode)
### Use in window class: master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
def on_Window_Close(master):
if isinstance(master, tk.Tk):
window_name = master.__class__
print(window_name)
th.window_list.pop()
master.destroy()
### Using to keybind window kill
def killwindow(event, master):
on_Window_Close(master)
### Open new window with his own master
def bopen(window):
x = window()
class Network_Window:
def __init__(master):
master = tk.Tk()
master.geometry("480x310")
master.title("Commander Pi")
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/Networkings.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " Networking", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
network_frame = Frame(mainframe)
network_frame.pack()
ether_title = tk.Label(network_frame, text="Ethernet:", font=("TkDefaultFont", 11, "bold"))
ether_title.grid(row=0, column=0)
wlan_title = tk.Label(network_frame, text="WiFi:", font=("TkDefaultFont", 11, "bold"))
wlan_title.grid(row=0, column=1)
ether_label = tk.Label( network_frame, text = rs.eth0_data, borderwidth=2, relief="groove", height=7, width=25, anchor='w', justify=LEFT )
ether_label.grid(row=1, column=0, sticky = W)
wlan_label = tk.Label( network_frame, text = rs.wlan0_data, borderwidth=2, relief="groove", height=7, width=25, anchor='w', justify=LEFT )
wlan_label.grid(row=1, column=1, sticky = W)
cc_frame = Frame(mainframe)
cc_frame.pack()
actual_country_code = tk.Label( cc_frame, text="Your country code: "+rs.get_country_code(), font=("TkDefaultFont", 11, "bold"))
actual_country_code.grid(row=0, column=0, columnspan=2)
country_code_label = tk.Label( cc_frame, text="Set your country code", font=("TkDefaultFont", 11, "bold"))
country_code_label.grid(row=1, column=0, columnspan=2)
country_code_entry = tk.Entry( cc_frame, justify=CENTER, width=5)
country_code_entry.grid(row=2, column=0, sticky=E)
country_code_button = tk.Button( cc_frame, text="Apply", command=lambda:push_cc(), font=("TkDefaultFont", 10, "bold"), cursor="hand2")
country_code_button.grid(row=2, column=1)
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
def push_cc():
code = country_code_entry.get()
if isinstance(code, str) and len(code) == 2:
code = code.upper()
rs.set_country_code(code)
actual_country_code.configure(text="Your country code: "+rs.get_country_code())
msb.showinfo(title="Done!", message="Your country code is now set as "+code)
else:
msb.showwarning(title="Error", message="Country code should be two letters!")
th.set_theme(master)
master.bind('<Escape>', lambda e:killwindow(e, master))
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
class Proc_Info_Window:
def __init__(master):
master = tk.Tk()
master.geometry("350x400")
master.title("Commander Pi")
th.window_list.append(master)
th.set_theme(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/CPUs.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " CPU Details", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=15)
cpu_content_frame = Frame(mainframe)
cpu_content_frame.pack(fill=X)
cpu_label = tk.Label( cpu_content_frame, text = rs.getproc0(), justify=LEFT, width=20, anchor='w' )
cpu_label.grid(row=0, column=0, rowspan=14, sticky=W)
cpu_label2 = tk.Label( cpu_content_frame, text = rs.getproc1(), justify=LEFT, width=13, anchor='w' )
cpu_label2.grid(row=0, column=1, rowspan=14, sticky=W)
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=15)
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
master.bind('<Escape>', lambda e:killwindow(e, master))
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
class About_Window:
def __init__(master):
master = tk.Tk()
master.geometry("400x450")
master.title("Commander Pi")
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/logo.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " About Application", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=15)
content_frame = Frame(mainframe)
content_frame.pack()
about_label = tk.Label( content_frame, text = "Commander Pi 2020\n", justify=CENTER, font=("TkDefaultFont", 11, "bold"))
about_label.pack()
text_label = tk.Label( content_frame, text="By Jack477\nFor Twister OS Armbian\n\nGraphic elements by grayduck\nIcon derived from a work by Vectors Market", justify=CENTER)
text_label.pack(fill=X)
version_label = tk.Label( content_frame, text=rs.get_app_version(), font=("TkDefaultFont", 11, "bold"), justify=CENTER)
version_label.pack()
link = tk.Label( content_frame, text="Changelog", cursor="hand2", fg="#1D81DA", pady=5)
link.pack(fill=X)
mlink = 'https://github.com/Jack477/CommanderPi/blob/master/CHANGELOG.md'
link.bind("<Button-1>", lambda e: rs.cpi_open_url(mlink))
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=15)
update_button = Button(mainframe, text="Check for updates", command=lambda:update_x(), cursor="hand2", font=("TkDefaultFont", 11, "bold"), state=DISABLED)
update_button.pack()
color_buton = Button(mainframe, text="Change color theme", command=lambda:change_theme(master), cursor="hand2", font=("TkDefaultFont", 11, "bold"))
color_buton.pack()
def update_x():
up.update_cpi()
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
master.bind('<Escape>', lambda e:killwindow(e, master))
th.set_theme(master)
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
### Main window
class Window:
def __init__(master):
master = tk.Tk()
master.geometry("420x550")
master.title("Commander Pi")
master.resizable(False, False)
#master.iconbitmap("@"+home_path+"/CommanderPi/src/xicon.ico")
icon = PhotoImage(file = home_path+"/CommanderPi/src/icon.png")
master.iconphoto(True, icon)
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack()
loadimg = Image.open(home_path+"/CommanderPi/src/icons/title_logo.png")
img = ImageTk.PhotoImage(image=loadimg)
img_label = tk.Label ( titleframe, image=img)
img_label.image = img
img_label.grid(row=0, column=0, columnspan=2)
#title_label = tk.Label( titleframe, text = "Commander Pi", font=("TkDefaultFont", 22, "bold") )
#title_label.grid(row=0, column=1)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=10)
infoframe = Frame(mainframe)
infoframe.pack(fill=X)
title2_label = tk.Label( infoframe, text = "ROCK PI VERSION\nReal-time system information:\n", font=("TkDefaultFont", 11, "bold"), anchor='w')
title2_label.grid(row=0, column=0, columnspan=2, sticky=W)
board_version_label = tk.Label( infoframe, text= rs.board_version, fg="red", anchor='w')
board_version_label.grid(row=1, column=0, columnspan=2, sticky=W)
kernel_version_label = tk.Label( infoframe, text = "Kernel version: ", width=30, anchor='w' )
kernel_version_label.grid(row=2, column=0, sticky=W)
kernel_version_label2 = tk.Label( infoframe, text = rs.kernel_version , width=15, anchor='w')
kernel_version_label2.grid(row=2, column=1)
kernel_mode_label = tk.Label( infoframe, text = "Operating mode: ", width=30, anchor='w')
kernel_mode_label.grid(row=3, column=0, sticky=W)
kernel_mode_label2 = tk.Label( infoframe, text = rs.get_kernel_mode(), width=15, anchor='w')
kernel_mode_label2.grid(row=3, column=1)
processor_architecture_label = tk.Label( infoframe, text="Processor architecture: ", width=30, anchor='w' )
processor_architecture_label.grid(row=4, column=0, sticky=W)
processor_architecture_label2 = tk.Label( infoframe, text=rs.processor_architecture, width=15, anchor='w')
processor_architecture_label2.grid(row=4, column=1)
memory_use_label = tk.Label( infoframe, text = "Memory usage: ", width=30, anchor='w' )
memory_use_label.grid(row=5, column=0, sticky=W)
memory_use_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
memory_use_label2.grid(row=5, column=1)
actual_gpu_temp_label = tk.Label( infoframe, text = "Actual GPU temperature: ", width=30, anchor='w' )
actual_gpu_temp_label.grid(row=6, column=0, sticky=W)
actual_gpu_temp_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
actual_gpu_temp_label2.grid(row=6, column=1)
actual_cpu_temp_label = tk.Label( infoframe, text = "Actual CPU temperature: ", width=30, anchor='w' )
actual_cpu_temp_label.grid(row=7, column=0, sticky=W)
actual_cpu_temp_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
actual_cpu_temp_label2.grid(row=7, column=1)
actual_cpu_usage_label = tk.Label( infoframe, text = "Processor frequency usage is: ", width=30, anchor='w')
actual_cpu_usage_label.grid(row=8, column=0, sticky=W)
actual_cpu_usage_label2 = tk.Label(infoframe, text = "", width=15, anchor='w')
actual_cpu_usage_label2.grid(row=8, column=1)
actual_gpu_usage_label = tk.Label( infoframe, text = "GPU frequency (V3D) usage is: ", width=30, anchor='w')
actual_gpu_usage_label.grid(row=9, column=0, sticky=W)
actual_gpu_usage_label2 = tk.Label(infoframe, text = "", width=15, anchor='w')
actual_gpu_usage_label2.grid(row=9, column=1)
used_label = tk.Label ( infoframe, text="Used disk space: ", width=30, anchor='w')
used_label.grid(row=10, column=0, sticky=W)
##BORDER TO TABLE borderwidth=2, relief="groove",
used_label2 = tk.Label ( infoframe, text=rs.used+"/"+rs.total+" GiB", width=15, anchor='w')
used_label2.grid(row=10, column=1)
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=10)
#REFRESH CPU USAGE, MEMORY USAGE AND TEMPERATURE
def refresh():
#for x in th.window_list:
# print(x.__class__)
ttext = rs.reftemp()
ptext = rs.refusage()
mtext = rs.refmem()
gtext = rs.reftemp2()
gputext = rs.refgpu()
#dtext = str(rs.get_disk_percent())
#dtext = "CPU usage " + rs.cpu_usagex +" MHz"
memory_use_label2.configure(text = mtext + "/100%")
actual_cpu_temp_label2.configure(text = ttext)
actual_cpu_usage_label2.configure(text = ptext)
actual_gpu_temp_label2.configure(text = gtext)
actual_gpu_usage_label2.configure(text = gputext)
master.after(1000, refresh)
refresh()
advanced_label = tk.Label( mainframe, text = "Advanced tools", font=("TkDefaultFont", 11, "bold"), anchor='w' )
advanced_label.pack(fill=X)
btn_frame = Frame(mainframe)
btn_frame.pack(fill=X)
photo1 = PhotoImage(file = home_path+"/CommanderPi/src/icons/CPUs.png")
#photoimage1 = photo1.subsample(15, 15)
proc_info_button = Button ( btn_frame, text="CPU details", command = lambda:bopen(Proc_Info_Window), width=60, height=80, cursor="hand2", image = photo1, compound=TOP)
proc_info_button.grid(row=0, column=0, padx=4)
#photo2 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Bootloaders.png")
#btn4 = Button (btn_frame, text="Bootloader", width=60, height=80, cursor="hand2", image = photo2, compound=TOP)
#btn4.grid(row=0, column=1, padx=4)
photo3 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Networkings.png")
btn5 = Button (btn_frame, text="Network", command = lambda:bopen(Network_Window), width=60, height=80, cursor="hand2", image = photo3, compound=TOP)
btn5.grid(row=0, column=2, padx=4)
#photo4 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Overclockings.png")
#btn2 = Button(btn_frame, text="Overclock", command = lambda:bopen(Overclock_Window), width=60, height=80, cursor="hand2", image = photo4, compound=TOP)
#btn2.grid(row=0, column=3, padx=4)
btn3 = Button( mainframe, text="About/Update", command = lambda:bopen(About_Window), font=("TkDefaultFont", 11, "bold"), cursor="hand2")
btn3.pack(side=BOTTOM, pady=5)
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
th.set_theme(master)
#up.check_update()
master.mainloop()
| #!/usr/bin/python
import sys
import os
import resources as rs
import update as up
import tkinter as tk
import theme as th
import importlib
import webbrowser
from tkinter import messagebox as msb
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
### TODO: Move change_theme function to theme.py?
### split resources.py into smaller files
### move window_list from theme.py to resources
home_path = sys.argv[1]
def change_theme(master):
if int(th.color_mode)==0:
print("Setting color theme to 1")
th.color_mode=1
else:
th.color_mode=0
rs.config.set('DEFAULT', 'color_mode', str(th.color_mode))
with open(home_path+'/CommanderPi/src/cpi.config', 'w') as configfile:
rs.config.write(configfile)
th.set_theme(master)
#print(th.color_mode)
### Use in window class: master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
def on_Window_Close(master):
if isinstance(master, tk.Tk):
window_name = master.__class__
print(window_name)
th.window_list.pop()
master.destroy()
### Using to keybind window kill
def killwindow(event, master):
on_Window_Close(master)
### Open new window with his own master
def bopen(window):
x = window()
class Network_Window:
def __init__(master):
master = tk.Tk()
master.geometry("480x310")
master.title("Commander Pi")
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/Networkings.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " Networking", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
network_frame = Frame(mainframe)
network_frame.pack()
ether_title = tk.Label(network_frame, text="Ethernet:", font=("TkDefaultFont", 11, "bold"))
ether_title.grid(row=0, column=0)
wlan_title = tk.Label(network_frame, text="WiFi:", font=("TkDefaultFont", 11, "bold"))
wlan_title.grid(row=0, column=1)
ether_label = tk.Label( network_frame, text = rs.eth0_data, borderwidth=2, relief="groove", height=7, width=25, anchor='w', justify=LEFT )
ether_label.grid(row=1, column=0, sticky = W)
wlan_label = tk.Label( network_frame, text = rs.wlan0_data, borderwidth=2, relief="groove", height=7, width=25, anchor='w', justify=LEFT )
wlan_label.grid(row=1, column=1, sticky = W)
cc_frame = Frame(mainframe)
cc_frame.pack()
actual_country_code = tk.Label( cc_frame, text="Your country code: "+rs.get_country_code(), font=("TkDefaultFont", 11, "bold"))
actual_country_code.grid(row=0, column=0, columnspan=2)
country_code_label = tk.Label( cc_frame, text="Set your country code", font=("TkDefaultFont", 11, "bold"))
country_code_label.grid(row=1, column=0, columnspan=2)
country_code_entry = tk.Entry( cc_frame, justify=CENTER, width=5)
country_code_entry.grid(row=2, column=0, sticky=E)
country_code_button = tk.Button( cc_frame, text="Apply", command=lambda:push_cc(), font=("TkDefaultFont", 10, "bold"), cursor="hand2")
country_code_button.grid(row=2, column=1)
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
def push_cc():
code = country_code_entry.get()
if isinstance(code, str) and len(code) == 2:
code = code.upper()
rs.set_country_code(code)
actual_country_code.configure(text="Your country code: "+rs.get_country_code())
msb.showinfo(title="Done!", message="Your country code is now set as "+code)
else:
msb.showwarning(title="Error", message="Country code should be two letters!")
th.set_theme(master)
master.bind('<Escape>', lambda e:killwindow(e, master))
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
class Proc_Info_Window:
def __init__(master):
master = tk.Tk()
master.geometry("350x400")
master.title("Commander Pi")
th.window_list.append(master)
th.set_theme(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/CPUs.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " CPU Details", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=15)
cpu_content_frame = Frame(mainframe)
cpu_content_frame.pack(fill=X)
cpu_label = tk.Label( cpu_content_frame, text = rs.getproc0(), justify=LEFT, width=20, anchor='w' )
cpu_label.grid(row=0, column=0, rowspan=14, sticky=W)
cpu_label2 = tk.Label( cpu_content_frame, text = rs.getproc1(), justify=LEFT, width=13, anchor='w' )
cpu_label2.grid(row=0, column=1, rowspan=14, sticky=W)
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=15)
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
master.bind('<Escape>', lambda e:killwindow(e, master))
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
class About_Window:
def __init__(master):
master = tk.Tk()
master.geometry("400x450")
master.title("Commander Pi")
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/logo.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " About Application", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=15)
content_frame = Frame(mainframe)
content_frame.pack()
about_label = tk.Label( content_frame, text = "Commander Pi 2020\n", justify=CENTER, font=("TkDefaultFont", 11, "bold"))
about_label.pack()
text_label = tk.Label( content_frame, text="By Jack477\nFor Twister OS Armbian\n\nGraphic elements by grayduck\nIcon derived from a work by Vectors Market", justify=CENTER)
text_label.pack(fill=X)
version_label = tk.Label( content_frame, text=rs.get_app_version(), font=("TkDefaultFont", 11, "bold"), justify=CENTER)
version_label.pack()
link = tk.Label( content_frame, text="Changelog", cursor="hand2", fg="#1D81DA", pady=5)
link.pack(fill=X)
mlink = 'https://github.com/Jack477/CommanderPi/blob/master/CHANGELOG.md'
link.bind("<Button-1>", lambda e: rs.cpi_open_url(mlink))
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=15)
update_button = Button(mainframe, text="Check for updates", command=lambda:update_x(), cursor="hand2", font=("TkDefaultFont", 11, "bold"), state=DISABLED)
update_button.pack()
color_buton = Button(mainframe, text="Change color theme", command=lambda:change_theme(master), cursor="hand2", font=("TkDefaultFont", 11, "bold"))
color_buton.pack()
def update_x():
up.update_cpi()
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
master.bind('<Escape>', lambda e:killwindow(e, master))
th.set_theme(master)
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
### Main window
class Window:
def __init__(master):
master = tk.Tk()
master.geometry("420x550")
master.title("Commander Pi")
master.resizable(False, False)
#master.iconbitmap("@"+home_path+"/CommanderPi/src/xicon.ico")
icon = PhotoImage(file = home_path+"/CommanderPi/src/icon.png")
master.iconphoto(True, icon)
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack()
loadimg = Image.open(home_path+"/CommanderPi/src/icons/title_logo.png")
img = ImageTk.PhotoImage(image=loadimg)
img_label = tk.Label ( titleframe, image=img)
img_label.image = img
img_label.grid(row=0, column=0, columnspan=2)
#title_label = tk.Label( titleframe, text = "Commander Pi", font=("TkDefaultFont", 22, "bold") )
#title_label.grid(row=0, column=1)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=10)
infoframe = Frame(mainframe)
infoframe.pack(fill=X)
title2_label = tk.Label( infoframe, text = "ROCK PI VERSION\nReal-time system information:\n", font=("TkDefaultFont", 11, "bold"), anchor='w')
title2_label.grid(row=0, column=0, columnspan=2, sticky=W)
board_version_label = tk.Label( infoframe, text= rs.board_version, fg="red", anchor='w')
board_version_label.grid(row=1, column=0, columnspan=2, sticky=W)
kernel_version_label = tk.Label( infoframe, text = "Kernel version: ", width=30, anchor='w' )
kernel_version_label.grid(row=2, column=0, sticky=W)
kernel_version_label2 = tk.Label( infoframe, text = rs.kernel_version , width=15, anchor='w')
kernel_version_label2.grid(row=2, column=1)
kernel_mode_label = tk.Label( infoframe, text = "Operating mode: ", width=30, anchor='w')
kernel_mode_label.grid(row=3, column=0, sticky=W)
kernel_mode_label2 = tk.Label( infoframe, text = rs.get_kernel_mode(), width=15, anchor='w')
kernel_mode_label2.grid(row=3, column=1)
processor_architecture_label = tk.Label( infoframe, text="Processor architecture: ", width=30, anchor='w' )
processor_architecture_label.grid(row=4, column=0, sticky=W)
processor_architecture_label2 = tk.Label( infoframe, text=rs.processor_architecture, width=15, anchor='w')
processor_architecture_label2.grid(row=4, column=1)
memory_use_label = tk.Label( infoframe, text = "Memory usage: ", width=30, anchor='w' )
memory_use_label.grid(row=5, column=0, sticky=W)
memory_use_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
memory_use_label2.grid(row=5, column=1)
actual_gpu_temp_label = tk.Label( infoframe, text = "Actual GPU temperature: ", width=30, anchor='w' )
actual_gpu_temp_label.grid(row=6, column=0, sticky=W)
actual_gpu_temp_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
actual_gpu_temp_label2.grid(row=6, column=1)
actual_cpu_temp_label = tk.Label( infoframe, text = "Actual CPU temperature: ", width=30, anchor='w' )
actual_cpu_temp_label.grid(row=7, column=0, sticky=W)
actual_cpu_temp_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
actual_cpu_temp_label2.grid(row=7, column=1)
actual_cpu_usage_label = tk.Label( infoframe, text = "Processor frequency usage is: ", width=30, anchor='w')
actual_cpu_usage_label.grid(row=8, column=0, sticky=W)
actual_cpu_usage_label2 = tk.Label(infoframe, text = "", width=15, anchor='w')
actual_cpu_usage_label2.grid(row=8, column=1)
actual_gpu_usage_label = tk.Label( infoframe, text = "GPU frequency (V3D) usage is: ", width=30, anchor='w')
actual_gpu_usage_label.grid(row=9, column=0, sticky=W)
actual_gpu_usage_label2 = tk.Label(infoframe, text = "", width=15, anchor='w')
actual_gpu_usage_label2.grid(row=9, column=1)
used_label = tk.Label ( infoframe, text="Used disk space: ", width=30, anchor='w')
used_label.grid(row=10, column=0, sticky=W)
##BORDER TO TABLE borderwidth=2, relief="groove",
used_label2 = tk.Label ( infoframe, text=rs.used+"/"+rs.total+" GiB", width=15, anchor='w')
used_label2.grid(row=10, column=1)
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=10)
#REFRESH CPU USAGE, MEMORY USAGE AND TEMPERATURE
def refresh():
#for x in th.window_list:
# print(x.__class__)
ttext = rs.reftemp()
ptext = rs.refusage()
mtext = rs.refmem()
gtext = rs.reftemp2()
gputext = rs.refgpu()
#dtext = str(rs.get_disk_percent())
#dtext = "CPU usage " + rs.cpu_usagex +" MHz"
memory_use_label2.configure(text = mtext + "/100%")
actual_cpu_temp_label2.configure(text = ttext)
actual_cpu_usage_label2.configure(text = ptext)
actual_gpu_temp_label2.configure(text = gtext)
actual_gpu_usage_label2.configure(text = gputext)
master.after(1000, refresh)
refresh()
advanced_label = tk.Label( mainframe, text = "Advanced tools", font=("TkDefaultFont", 11, "bold"), anchor='w' )
advanced_label.pack(fill=X)
btn_frame = Frame(mainframe)
btn_frame.pack(fill=X)
photo1 = PhotoImage(file = home_path+"/CommanderPi/src/icons/CPUs.png")
#photoimage1 = photo1.subsample(15, 15)
proc_info_button = Button ( btn_frame, text="CPU details", command = lambda:bopen(Proc_Info_Window), width=60, height=80, cursor="hand2", image = photo1, compound=TOP)
proc_info_button.grid(row=0, column=0, padx=4)
#photo2 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Bootloaders.png")
#btn4 = Button (btn_frame, text="Bootloader", width=60, height=80, cursor="hand2", image = photo2, compound=TOP)
#btn4.grid(row=0, column=1, padx=4)
photo3 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Networkings.png")
btn5 = Button (btn_frame, text="Network", command = lambda:bopen(Network_Window), width=60, height=80, cursor="hand2", image = photo3, compound=TOP)
btn5.grid(row=0, column=2, padx=4)
#photo4 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Overclockings.png")
#btn2 = Button(btn_frame, text="Overclock", command = lambda:bopen(Overclock_Window), width=60, height=80, cursor="hand2", image = photo4, compound=TOP)
#btn2.grid(row=0, column=3, padx=4)
btn3 = Button( mainframe, text="About/Update", command = lambda:bopen(About_Window), font=("TkDefaultFont", 11, "bold"), cursor="hand2")
btn3.pack(side=BOTTOM, pady=5)
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
th.set_theme(master)
#up.check_update()
master.mainloop()
| en | 0.399893 | #!/usr/bin/python ### TODO: Move change_theme function to theme.py? ### split resources.py into smaller files ### move window_list from theme.py to resources #print(th.color_mode) ### Use in window class: master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master)) ### Using to keybind window kill ### Open new window with his own master ### Main window #master.iconbitmap("@"+home_path+"/CommanderPi/src/xicon.ico") #title_label = tk.Label( titleframe, text = "Commander Pi", font=("TkDefaultFont", 22, "bold") ) #title_label.grid(row=0, column=1) ##BORDER TO TABLE borderwidth=2, relief="groove", #REFRESH CPU USAGE, MEMORY USAGE AND TEMPERATURE #for x in th.window_list: # print(x.__class__) #dtext = str(rs.get_disk_percent()) #dtext = "CPU usage " + rs.cpu_usagex +" MHz" #photoimage1 = photo1.subsample(15, 15) #photo2 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Bootloaders.png") #btn4 = Button (btn_frame, text="Bootloader", width=60, height=80, cursor="hand2", image = photo2, compound=TOP) #btn4.grid(row=0, column=1, padx=4) #photo4 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Overclockings.png") #btn2 = Button(btn_frame, text="Overclock", command = lambda:bopen(Overclock_Window), width=60, height=80, cursor="hand2", image = photo4, compound=TOP) #btn2.grid(row=0, column=3, padx=4) #up.check_update() | 2.622159 | 3 |
MobileRobot_class.py | michelyakoub/Reinforcement-learning-based-navigation-for-autonomous-mobile-robots-in-unknown-environments | 0 | 6622844 | import sys
sys.path.append(r"E:\GUC\semester 8\codes\gym_pathfinding_master")
import gym
import gym_pathfinding
import math
import numpy as np
import matplotlib.pyplot as plt
class Robot(object) :
""" Defines basic mobile robot properties """
def __init__(self) :
self.pos_x = 0.0
self.pos_y = 0.0
self.theta = 0.0
self.plot = False
self._delta = 0.1 #sample time
self.env_width = 4
self.env_length = 4
# Movement
def step (self):
""" updates the x , y and angle """
self.deltax()
self.deltay()
self.deltaTheta()
def move(self , seconds):
""" Moves the robot for an ' s ' amount of seconds """
for i in range(int(seconds/self._delta)):
self.step()
if i%3 == 0 and self.plot: # plot path every 3 steps
self.plot_xya ( )
# Printing−and−plotting :
def print_xya (self):
""" prints the x , y position and angle """
print("x = " + str(self.pos_x)+" "+"y = "+ str(self.pos_y))
print("a = " + str(self.theta))
def plot_robot (self):
""" plots a representation of the robot """
plt.arrow(self.pos_y ,self.pos_x , 0.001 * math.sin (self.theta ),0.001 * math.cos (self.theta ) ,
head_width=self.length , head_length=self.length ,
fc= 'k' , ec= 'k' )
def plot_xya (self):
""" plots a dot in the position of the robot """
plt.scatter(self.pos_y ,self.pos_x,c= 'r',edgecolors= 'r' )
class MobileRobot (Robot ) :
""" Defines a MobileRobot """
def __init__(self):
Robot.__init__(self)
# geometric parameters
self.r = 0.03
self.c = 0.065
self.a = 0.055
self.b = 0.055
self.Xicr = 0.055
self.length = 0.11
# states
self.omega = 0
self.vx = 0
self.vy = 0
self.omega_r = 0.0
self.omega_l = 0.0
self.orientation = 0
def deltax(self) :
# calculate vx and vy
self.omega = self.r * (self.omega_r - self.omega_l)/(2* self. c )
self.vx = self.r * (self.omega_r + self.omega_l)/2
self.vy = self.Xicr * self.omega
# calculate X_dot
X_dot = math.cos(self.theta)*self.vx - math.sin(self.theta)*self.vy
self. pos_x += self._delta * X_dot
if self.pos_x > self.env_width:
self.pos_x = self.env_width
elif self.pos_x < 0:
self.pos_x = 0
def deltay ( self) :
# calculate vx and vy
self.omega = self.r * (self.omega_r - self.omega_l)/(2 * self.c)
self.vx = self.r * (self.omega_r + self.omega_l)/2
self.vy = self.Xicr * self.omega
# calculate Y_dot
Y_dot = math.sin ( self. theta ) * self.vx + math.cos ( self. theta ) * self. vy
self. pos_y += self. _delta * Y_dot
if self. pos_y > self.env_length:
self. pos_y = self.env_length
elif self. pos_y < 0:
self. pos_y = 0
def deltaTheta ( self) :
# calculate omega
self.omega = self. r * ( self. omega_r-self. omega_l ) /(2* self. c )
self. theta += self. _delta * self.omega
def reset (self,player) :
# given 4mx4m arena discretized to 25x25
self.pos_x = player[0] *self.env_width/24 #discrete levels
self.pos_y = player[1] *self.env_length/24 #discrete levels
self.theta = np.random.uniform(-np.pi,np.pi)
# print(self.theta)
def optimal_action ( self , action ) :
self.action = action
def take_step (self) :
if self.action == 0 : # Forward
self.omega_l = 1.7
self.omega_r = 1.7
elif self.action == 1 : # Backward
self.omega_l = -1.7
self.omega_r = -1.7
elif self.action == 2 : # left
self.omega_l = -1.7
self.omega_r = 1.7
else : # right
self.omega_l = 1.7
self.omega_r = -1.7
self.move(self._delta)
def get_discretized_state ( self) :
x_discrete = math.floor ( ( ( self. pos_x ) /self.env_width) *24)
y_discrete = math.floor ( ( ( self. pos_y ) /self.env_length) *24)
theta_discrete = np.arctan2 (math. sin ( self. theta ) , math. cos ( self. theta ) )
# print(theta_discrete *180/np.pi)
theta_discrete = math.floor(theta_discrete/(2 * np.pi) * 20)
# print(theta_discrete)
return ( x_discrete , y_discrete , theta_discrete )
def assign_discretized_state ( self , x , y ) :
self.pos_x= ( ( x ) /24) *self.env_width
self.pos_y= ( ( y ) /24) *self.env_length | import sys
sys.path.append(r"E:\GUC\semester 8\codes\gym_pathfinding_master")
import gym
import gym_pathfinding
import math
import numpy as np
import matplotlib.pyplot as plt
class Robot(object) :
""" Defines basic mobile robot properties """
def __init__(self) :
self.pos_x = 0.0
self.pos_y = 0.0
self.theta = 0.0
self.plot = False
self._delta = 0.1 #sample time
self.env_width = 4
self.env_length = 4
# Movement
def step (self):
""" updates the x , y and angle """
self.deltax()
self.deltay()
self.deltaTheta()
def move(self , seconds):
""" Moves the robot for an ' s ' amount of seconds """
for i in range(int(seconds/self._delta)):
self.step()
if i%3 == 0 and self.plot: # plot path every 3 steps
self.plot_xya ( )
# Printing−and−plotting :
def print_xya (self):
""" prints the x , y position and angle """
print("x = " + str(self.pos_x)+" "+"y = "+ str(self.pos_y))
print("a = " + str(self.theta))
def plot_robot (self):
""" plots a representation of the robot """
plt.arrow(self.pos_y ,self.pos_x , 0.001 * math.sin (self.theta ),0.001 * math.cos (self.theta ) ,
head_width=self.length , head_length=self.length ,
fc= 'k' , ec= 'k' )
def plot_xya (self):
""" plots a dot in the position of the robot """
plt.scatter(self.pos_y ,self.pos_x,c= 'r',edgecolors= 'r' )
class MobileRobot (Robot ) :
""" Defines a MobileRobot """
def __init__(self):
Robot.__init__(self)
# geometric parameters
self.r = 0.03
self.c = 0.065
self.a = 0.055
self.b = 0.055
self.Xicr = 0.055
self.length = 0.11
# states
self.omega = 0
self.vx = 0
self.vy = 0
self.omega_r = 0.0
self.omega_l = 0.0
self.orientation = 0
def deltax(self) :
# calculate vx and vy
self.omega = self.r * (self.omega_r - self.omega_l)/(2* self. c )
self.vx = self.r * (self.omega_r + self.omega_l)/2
self.vy = self.Xicr * self.omega
# calculate X_dot
X_dot = math.cos(self.theta)*self.vx - math.sin(self.theta)*self.vy
self. pos_x += self._delta * X_dot
if self.pos_x > self.env_width:
self.pos_x = self.env_width
elif self.pos_x < 0:
self.pos_x = 0
def deltay ( self) :
# calculate vx and vy
self.omega = self.r * (self.omega_r - self.omega_l)/(2 * self.c)
self.vx = self.r * (self.omega_r + self.omega_l)/2
self.vy = self.Xicr * self.omega
# calculate Y_dot
Y_dot = math.sin ( self. theta ) * self.vx + math.cos ( self. theta ) * self. vy
self. pos_y += self. _delta * Y_dot
if self. pos_y > self.env_length:
self. pos_y = self.env_length
elif self. pos_y < 0:
self. pos_y = 0
def deltaTheta ( self) :
# calculate omega
self.omega = self. r * ( self. omega_r-self. omega_l ) /(2* self. c )
self. theta += self. _delta * self.omega
def reset (self,player) :
# given 4mx4m arena discretized to 25x25
self.pos_x = player[0] *self.env_width/24 #discrete levels
self.pos_y = player[1] *self.env_length/24 #discrete levels
self.theta = np.random.uniform(-np.pi,np.pi)
# print(self.theta)
def optimal_action ( self , action ) :
self.action = action
def take_step (self) :
if self.action == 0 : # Forward
self.omega_l = 1.7
self.omega_r = 1.7
elif self.action == 1 : # Backward
self.omega_l = -1.7
self.omega_r = -1.7
elif self.action == 2 : # left
self.omega_l = -1.7
self.omega_r = 1.7
else : # right
self.omega_l = 1.7
self.omega_r = -1.7
self.move(self._delta)
def get_discretized_state ( self) :
x_discrete = math.floor ( ( ( self. pos_x ) /self.env_width) *24)
y_discrete = math.floor ( ( ( self. pos_y ) /self.env_length) *24)
theta_discrete = np.arctan2 (math. sin ( self. theta ) , math. cos ( self. theta ) )
# print(theta_discrete *180/np.pi)
theta_discrete = math.floor(theta_discrete/(2 * np.pi) * 20)
# print(theta_discrete)
return ( x_discrete , y_discrete , theta_discrete )
def assign_discretized_state ( self , x , y ) :
self.pos_x= ( ( x ) /24) *self.env_width
self.pos_y= ( ( y ) /24) *self.env_length | en | 0.540397 | Defines basic mobile robot properties #sample time # Movement updates the x , y and angle Moves the robot for an ' s ' amount of seconds # plot path every 3 steps # Printing−and−plotting : prints the x , y position and angle plots a representation of the robot plots a dot in the position of the robot Defines a MobileRobot # geometric parameters # states # calculate vx and vy # calculate X_dot # calculate vx and vy # calculate Y_dot # calculate omega # given 4mx4m arena discretized to 25x25 #discrete levels #discrete levels # print(self.theta) # Forward # Backward # left # right # print(theta_discrete *180/np.pi) # print(theta_discrete) | 3.819983 | 4 |
lamana/input_.py | par2/lamana | 1 | 6622845 | <gh_stars>1-10
# -----------------------------------------------------------------------------
'''Classes and functions for handling user inputs.'''
# Geometry(): parse user input geometry strings to a tuple of floats (and lists/str)
# BaseDefaults(): library of general geometry defaults; subclassed by the user.
# flake8 input_.py --ignore=E265, E501, N803, N806, N802, N813, E133
import re
import logging
import itertools as it
import collections as ct
import pandas as pd
import lamana as la
#import lamana.lt_exceptions as exc
from lamana.lt_exceptions import FormatError
from lamana.utils import tools as ut
# =============================================================================
# USER INPUT ------------------------------------------------------------------
# =============================================================================
# Parses informations and generates User Input objects, i.e. Geometry()
def tokenize_geostring(geo_string):
'''Return a tuple of tokens; outer, inner_i, middle tokens.'''
# Convert to General Convention
# TODO: Add handling of duples into _to_gen_convention first
# For now, assume strings are formated correctly
#g_conv = la.input_.Geometry._to_gen_convention(geo_string)
# Check is_valid(); if not attempt to_gen_convention
# Prepare string
geo_string = geo_string.upper() # auto uppercase
geo_string = geo_string.replace(' ','') # auto whitespace strip
# Return tokens
tokens = geo_string.split('-')
return tokens
class Geometry(object):
'''Parse input geometry string into floats.
When a single (or a list of) geometry string(s) is passed to the
`lamana.distributions.Case.apply()` method, this class parses those strings
into (outer, [inner], middle, 'S') format'; 'S' is optional.
Here are examples of conventions used to write geometry strings:
- General: outer-[inner_i]-middle
- Short-hand: outer-inner-middle
Formatted in General Convention, a converted namedtuple of the geometry
is returned. Examples of GeometryTuples:
- (400.0, [200.0], 800.0) # multi-ply
- (400.0, [200.0], 800.0, 'S') # multi-ply, symmetric
- (400.0, [100.0, 100.0], 800.0) # multi-ply, [inner_i]
Parameters
----------
geo_input : str or tupled str
Geometry string of layer thicknesses in a laminate.
Attributes
----------
total
total_middle
total_inner
total_inner_i
total_outer
is_symmetric
namedtuple : namedtuple
A GeometryTuple in General Convention, e.g. (400.0, [200.0], 800.0, 'S').
geometry : list
The converted and parsed geometry string.
middle : float
Middle layer thickness.
inner : list of floats
Inner layer thicknesses in micrometers.
outer : float
Outer layer thickness.
string : str
The input geometry string converted to General Convention format.
Examples
--------
>>> g1 = ('0-0-2000') # Monolith
>>> g2 = ('1000-0-0') # Bilayer
>>> g3 = ('600-0-800') # Trilayer
>>> g4 = ('500-500-0') # 4-ply
>>> g5 = ('400-200-800') # Short-hand; <= 5-ply
>>> g6 = ('400-200-400S') # Symmetric
>>> g7 = ('400-[200]-800') # Gen. convention; 5-ply
>>> g8 = ('400-[100,100]-800') # Gen. convention; 7-plys
>>> la.input_.Geometry(g5)
Geometry object (400.0-[200.0]-800.0)
'''
def __init__(self, geo_input):
'''Ideally a want to call Geometry and get namedtuple auto; return self?'''
# TODO: Consolidate into namedtuple or self
# TODO: rename geometrytuple
self.string = self.__class__._to_gen_convention(geo_input)
self.namedtuple = self._parse_geometry(self.string)# a namedtuple; see collections lib
self.geometry = self._parse_geometry(self.string) # a namedtuple; see collections lib
## self.namedtuple = self._parse_geometry(geo_input) # a namedtuple; see collections lib
## self.geometry = self._parse_geometry(geo_input) # a namedtuple; see collections lib
self.middle = self.geometry.middle # attributes from namedtuple; symmetric sensitive
self.inner = self.geometry.inner
self.outer = self.geometry.outer
## self.string = self.__class__._to_gen_convention(geo_input)
# Private attribute used for set comparisons and hashing
# TODO: should we pass in geo_string instead of geo_inputs?
self._geometry_hash = self._parse_geometry(geo_input, hash_=True)
def __str__(self):
'''Returns trimmed geometry string.'''
return self.string
def __repr__(self): # for object calls
'''Returns Geometry object string.'''
return '{} object ({})'.format(self.__class__.__name__, self.string)
def __eq__(self, other):
'''Compare GeometryTuples.'''
if isinstance(other, self.__class__):
#print(self.__dict__)
#print(other.__dict__)
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
'''Allow set comparisons.
The only required property for a hash is that objects which equally
compare have the same hash value (REF 035). `self.__dict__` is unhashable
due to the inner list (lists are mutable, thus unhashable). So a copy is
made called `_geometry_hash` of GeometryTuple where the inner value is
tupled instead.
'''
return hash(self._geometry_hash)
#return hash(tuple(sorted(self.__dict__.items())))
#return hash(self._geo_string)
def _parse_geometry(self, geo_string, hash_=False):
'''Return a namedtuple of outer-inner-middle geometry values.
Per General Convention, a GeometryTuple has floats and an inner list.
Checks for symmetry, handles inner list, then makes the GeometryTuple.
Also can create a hashable version of GeometryTuple; tuple instead of
list for inner_i.
Parameters
---------
geo_string : tupled str or str
outer-inner-middle values or outer-[inner]-middle values;
formatted in general convention (0.4.11).
Returns
-------
namedtuple of mixed types
GeometryTuple: numeric values converted to floats; (outer, [inner], middle,'S?')
'''
def check_symmetry(last):
'''Yield float or str if 'S' is in the last token of the geometry string.
Examples
--------
>>> [i for i in check_symmetry('400S')]
[400.0, 'S']
>>> [i for i in check_symmetry('400')]
[400.0]
'''
if last.endswith('S'):
# Converts last '400S' to [400.0,'S']
#print(type(last))
number = float(last[:-1])
letter = last[-1]
yield number
yield letter
else:
# Converts last '400' to [400.0]
yield float(last)
# Inner Parsing
def parse_inner(inside):
'''Yield values from the inner list of a geometry string.
Also parses inner_i (if multiple inners are found) str to floats.
This is later converted to a list of inner_i, as required by LPEP 001.01.
Examples
--------
>>> list(parse_inner('[100,100]'))
[100.0, 100.0]
>>> list(parse_inner('[200]'))
[200.0]
>>> list(parse_inner('200'))
[200.0]
Raises
------
TypeError
If a non-string is passed in for the geo_string arg.
FormatError
If the parsed geo string is less than 3 tokens.
'''
if inside.startswith('['):
if ',' in inside:
# Convert inside string '[100,100]' to [100.0,100.0]
for item in inside[1:-1].split(','):
yield float(item)
else:
# Convert inside string '[200]' to [200.0]
converted = inside[1:-1] # excludes bracket strings
#print(converted)
yield float(converted)
else:
# Convert insde string '200' to [200] if brackets aren't found
yield float(inside)
# Convert Input String
def _make_GeometryTuple(tokens, hashable=False):
'''Return a namedtuple of floats representing geometry thicknesses.'''
outer = float(tokens[0])
inner_hash = tuple(parse_inner(tokens[1])) # tupled inner for hash()
inner = list(parse_inner(tokens[1])) # complies w LPEP 001.01
middle = tuple(check_symmetry(tokens[-1]))
'''Should warn the object is symmetric though.'''
if 'S' not in geo_string:
GeometryTuple = ct.namedtuple(
'GeometryTuple', ['outer', 'inner', 'middle'])
if hashable:
return GeometryTuple(outer, inner_hash, middle[0])
return GeometryTuple(outer, inner, middle[0])
else:
GeometryTuple = ct.namedtuple(
'GeometryTuple', ['outer', 'inner', 'middle', 'symmetric'])
if hashable:
return GeometryTuple(outer, inner_hash, middle[0], middle[-1])
return GeometryTuple(outer, inner, middle[0], middle[-1])
# Create --------------------------------------------------------------
'''Replace with try-except block.'''
# Tests geo_string is a string
# TODO: change to geo_string
tokens = geo_string.split('-')
# TODO: Find another name for hash_ which is a bool'''
# Gets hash_ bool passed in from parsed_geometry
return _make_GeometryTuple(tokens, hashable=hash_)
##tokens = geo_input.split('-')
##if not isinstance(geo_input, str):
##raise TypeError("Cannot parse input type. Supported types: str")
##elif len(tokens) < 3:
# TODO: Replace with custom exception
##raise exc.FormatError(
## "Input token is too short. Supported geometry string format:"
## " 'outer-[inner_i]-middle'"
##)
##else:
# TODO: Find another name for hash_ which is a bool'''
# Gets hash_ bool from parsed_geometry
##return _make_GeometryTuple(tokens, hashable=hash_)
@classmethod
def _to_gen_convention(cls, geo_input):
'''Return a geometry string converted to general convention.
Handles string-validation Exceptions.
'''
try:
# Check geo_input is a string
tokens = geo_input.split('-')
# Check for letters in the input
any_letters = re.compile('[a-zA-Z]', re.IGNORECASE)
if any_letters.search(geo_input):
all_letters = any_letters.findall(geo_input)
# Raise if more than one letter in the Input
if len(all_letters) > 1:
##raise exc.FormatError(
raise FormatError(
"Input must not contain more than one letter, 'S'."
)
# Raise if 's' or 'S' is not the letter
if not set(all_letters).issubset(['s', 'S']):
##raise exc.FormatError(
raise FormatError(
"Invalid letter detected; only 'S' allowed."
)
if len(tokens) < 3:
##raise exc.FormatError(
raise FormatError(
"Input token is too short. Supported geometry string format:"
" 'outer-[inner_i]-middle'"
)
if len(tokens) > 3:
##raise exc.FormatError(
raise FormatError(
"Input token is too long. Supported geometry string format:"
" 'outer-[inner_i]-middle'"
)
except(AttributeError):
# Needed in general and for distributions.Cases()
raise TypeError(
"Cannot parse input type. Supported types: str. {} given.".format(geo_input)
)
first = tokens[0]
inside = tokens[1]
last = tokens[-1]
#print(inside)
# Convert strings to general convention
first = str(float(first))
if inside.startswith('['):
# Convert inside string '[100,100]' to '100.0,100.0'
if ',' in inside:
converted = [str(float(item)) for item in inside[1:-1].split(',')]
inside = ','.join(converted)
else:
# Convert inside string '[200]' to '200.0'
inside = str(float(inside[1:-1]))
elif not inside.startswith('['):
# Convert inside string '200' to 200.0 if brackets aren't found
inside = str(float(inside))
# Always add brackets
inside = ''.join(['[', inside, ']'])
#print('Converting geometry string to general convention.')
if last.endswith('S'):
last = str(float(last[:-1]))
last = ''.join([last, 'S'])
elif not last.endswith('S'):
last = str(float(last))
geo_string = '-'.join([first, inside, last])
return geo_string
# API --------------------------------------------------------------------
# DEPRECATED: itotal() in 0.4.3d
@property
def total(self):
'''Calculate total thickness for laminates using any convention.'''
if self.is_symmetric:
factor = 2
else:
factor = 1
return 2 * self.outer + 2 * sum(self.inner) + factor * self.middle
@property
def total_middle(self):
'''Calculate total thickness for middle lamina using any convention.'''
if self.is_symmetric:
return 2 * self.middle
else:
return self.middle
@property
def total_inner(self):
'''Calculate total thickness for inner lamina using any convention.'''
return 2 * sum(self.inner)
@property
def total_inner_i(self):
'''Calculate total thickness for the ith inner lamina.'''
result = [inner_i * 2 for inner_i in self.inner]
return result
@property
def total_outer(self):
'''Calculate total thickness for outer lamina using any convention.'''
return 2 * self.outer
@property
def is_symmetric(self):
'''Return True if 'S' convention is used.
Examples
--------
>>> g5 = ('400-200-800') # Short-hand use; <= 5-ply
>>> g6 = ('400-200-400S') # Symmetric
>>> for geo in [g5, g6]
... geo.is_symmetric
False True
'''
return 'S' in self.geometry
# -----------------------------------------------------------------------------
# UTILITY
# -----------------------------------------------------------------------------
# Supplies basic dicts and methods by supplying custom case-building information.
class BaseDefaults(object):
'''Common geometry strings, objects and methods for building defaults.
Allows quick access to default parameters. It is useful for consistent testing.
Users can subclass geometric defaults and add specific parameters
(loading, material, geometric, etc.) to improve start-up and reduce
redundant code. Here are some objects that can be found in and subclassed
from this base class:
- Base : Default geometry strings
- Base : Default Geometry objects
- Subclass : Default material and geometric/loading parameters
- Subclass : Default FeatureInputs
Defaults are maintained in two dicts:
- `geo_inputs` : A dict of standard geometry strings and special groups.
- `Geo_objects` : A dict of converted geo_inputs into Geometry objects.
Methods
-------
get_FeatureInput(Geometry, load_params=None, mat_props=None, **kwargs)
Return a dict of the basic FeatureInput object; subclass in a model.
get_materials(mat_props)
Return a list of materials in order from a mat_props dict or DataFrame.
generate(selection=None, geo_inputs=False)
Yield a generator of selected geometries.
Notes
-----
DEV: add entries to the Default dicts. Removing existing dict entries or
"trimming" the Default dicts will break tests (not recommended).
Material properties, geometric/loading parameters and FeatureInput cannot be
generalized and are thus left to the author to define in their custom defaults
subclass. The best place to customize this is in a custom models module.
Examples
--------
Base: Idiomatic instantiation of Base Defaults
>>> bdft = BaseDefaults() # instantiation
Access a set of built-in geometry strings
>>> bdft.geos_most # list of geometry Input strings
[('0-0-2000'), ('1000-0-0'), ('600-0-800'),
('500-500-0'), ('400-200-800')]
Access a set of built-in Geometry objects (converted geometry strings)
>>> bdft.Geos_simple # list of Geometry objects
[<Geometry object ('0-0-2000')>,
<Geometry object ('1000-0-0')>,
<Geometry object '600-0-800')>,
<Geometry object ('500-500-0')>,]
Subclass: Idiomatic import and instantiation of custom Defaults (see Wilson_LT ex.)
>>> from lamana.models import Wilson_LT as wlt # user-implemmented Defaults
>>> dft = wlt.Defaults() # subclassed from BaseDefaults
Access Defaults loading parameters, material properties and FeatureInput
>>> dft.load_params
{'R': 12e-3, 'a': 7.5e-3, 'p': 1, 'P_a': 1, 'r': 2e-4}
>>> dft.mat_props
{'HA': [5.2e10, 0.25], 'PSu': [2.7e9, 0.33],}
>>> dft.FeatureInput
{'Geometry': '400-[200]-800',
'Geometric': {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,},
'Materials': {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],},
'Custom': None,
'Model': Wilson_LT,
'Globals': None,}
Reassign Defaults instances (e.g. R, p)
>>> dft.load_params = {
... 'R': 50e-3, 'a': 7.5e-3, 'p' : 5,
... 'P_a': 1, 'r': 2e-4,
... }
>>> dft.load_params
{'R': 50e-3, 'a': 7.5e-3, 'p' : 5, 'P_a': 1, 'r': 2e-4,}
'''
def __init__(self):
# TODO: Add BaseDefaults attributes to claim the namespace
# i.e, load_params = None, mat_props = None, FeatureInput = None
# Consider this architexture rather than leave the author with oneous to def vars
# Geometry Input Strings
# DEV: Add geometry strings here. Do not remove.
self.geo_inputs = {
'1-ply': ['0-0-2000', '0-0-1000'],
'2-ply': ['1000-0-0'],
'3-ply': ['600-0-800', '600-0-400S'],
'4-ply': ['500-500-0', '400-[200]-0'],
'5-ply': ['400-200-800', '400-[200]-800', '400-200-400S'],
'6-ply': ['400-[100,100]-0', '500-[250,250]-0'],
'7-ply': ['400-[100,100]-800', '400-[100,100]-400S'],
'9-ply': ['400-[100,100,100]-800'],
'10-ply': ['500-[50,50,50,50]-0'],
'11-ply': ['400-[100,100,100,100]-800'],
'13-ply': ['400-[100,100,100,100,100]-800'],
}
# To add keys, first add logic to automate appending dict_ in groupify
# This should add a key to geo_inputs. Geo_objects will auto-mimic this logic.
# Then define attributes (below) of geo_inputs and Geo_objects for API access.
self.geo_inputs = self._groupify_dict(self.geo_inputs) # needed for next line
self.Geo_objects = self._groupify_dict(self.geo_inputs, Geo_obj=True)
# ATTRIBUTES ----------------------------------------------------------
# Quick access to geometry related groups
# Geometry Input String Attributes
self.geos_even = self.geo_inputs['even']
self.geos_odd = self.geo_inputs['odd']
self.geos_most = self.geo_inputs['most']
self.geos_special = self.geo_inputs['special']
self.geos_full = self.geo_inputs['full']
self.geos_full2 = self.geo_inputs['full2']
self.geos_full3 = self.geo_inputs['full3']
self.geos_all = self.geo_inputs['all']
self.geos_standard = self.geo_inputs['standard']
self.geos_symmetric = self.geo_inputs['symmetric']
self.geos_inner_i = self.geo_inputs['inner_i']
self.geos_general = self.geo_inputs['general conv.']
self.geos_unconventional = self.geo_inputs['unconventional']
self.geos_dissimilar = self.geo_inputs['dissimilar']
self.geos_sample = self.geo_inputs['sample']
# TODO: must be way to automate these assignments; reduce redundancy.
# Geometry Object Attributes
self.Geos_even = self.Geo_objects['even']
self.Geos_odd = self.Geo_objects['odd']
self.Geos_most = self.Geo_objects['most']
self.Geos_special = self.Geo_objects['special']
self.Geos_full = self.Geo_objects['full']
self.Geos_full2 = self.Geo_objects['full2']
self.Geos_full3 = self.Geo_objects['full3']
self.Geos_all = self.Geo_objects['all']
self.Geos_standard = self.Geo_objects['standard']
self.Geos_symmetric = self.Geo_objects['symmetric']
self.Geos_inner_i = self.Geo_objects['inner_i']
self.Geos_general = self.Geo_objects['general conv.']
self.Geos_unconventional = self.Geo_objects['unconventional']
self.Geos_dissimilar = self.Geo_objects['dissimilar']
self.geos_sample = self.geo_inputs['sample']
@classmethod
def _extract_number(cls, string):
'''Return integer of numerics found in a string, i.e. '5-ply' --> 5.'''
for s in string.split('-'): # separate ply from number
if s.isdigit():
#print(s)
return int(s)
@classmethod
def _groupify_dict(cls, dict_default, Geo_obj=False):
'''Return a dict of logical groups.
This method is useful for automating groupings; new ply keys or
values can be added to the dict with relative ease.
Parameters
----------
dict_default : dict
Given a dict of "plies (str):geo strings (list)", key-value pairs.
Geo_obj : bool; Default False
True if a returned dict is desired of Geometry objects.
Returns
-------
dict
Keys of specified groups.
See Also
--------
utils.tools.natural_sort : order dict.items() in loops; needed for tests
Notes
-----
This methods requires:
- name keys by number of plies; e.g. '14-ply' (human sorts)
- add values as a list of strings
- add to the geo_inputs dict (Geo_objects mimics and updates automatically)
'''
d = ct.defaultdict(list)
dict_ = dict_default.copy()
# Sort dict naturally to help order the list values
##for k,v in sorted(dict_.items(), key=natural_sort):
for k, v in sorted(dict_.items(), key=ut.natural_sort):
# Prepare k, v
num = cls._extract_number(k)
if Geo_obj: # build Geo_objects simul.
G = la.input_.Geometry
v = [G(geo_string) for geo_string in v]
dict_[k] = v # overwrite original dict_
# Group keys
if (num is not None) and (num % 2 == 0):
#print(num)
d['even'].extend(v)
elif (num is not None) and (num % 2 != 0):
d['odd'].extend(v)
if (num is not None) and (num <= 5):
d['most'].append(v[0])
if (num is not None) and (num < 5):
d['special'].append(v[0])
if (num is not None) and (num == 5):
d['standard'].append(v[1])
if (num is not None) and (num <= 9):
#print(num)
d['full'].append(v[0])
if num is not None:
d['all'].extend(v)
# Inside strings
if not Geo_obj:
for geo_string in v:
if 'S' in geo_string:
#print(geo_string)
d['symmetric'].append(geo_string)
if ('[' in geo_string) and (',' in geo_string):
d['inner_i'].append(geo_string)
if '[' in geo_string:
#print(geo_string)
d['general conv.'].append(geo_string)
# TODO: refactor logic; test_BaseDefaults_unconventional1() should cover this
if '[' not in geo_string:
d['unconventional'].append(geo_string)
dict_.update(d)
# Post-fix groups; manual
# Note, manually adding strings here (vs. __init__) won't be grouped in Geo_object
# You must add to geo_input; the Geo_obj will be auto-made.
if Geo_obj:
'''Make smarter; look for unequal inners then make dissimilar.'''
dict_['dissimilar'] = [G('400-[150,50]-800'), G('400-[25,125,50]-800')]
else:
dict_['dissimilar'] = ['400-[150,50]-800', '400-[25,125,50]-800', ]
# Dict_references are auto added to Geo_obects
dict_['full2'] = dict_['full'] + dict_['symmetric']
dict_['full3'] = dict_['full2']
dict_['full3'].append(dict_['6-ply'][1])
dict_['full3'].append(dict_['10-ply'][0])
# A group that samples the first item of other groups except fulls and all
dict_['sample'] = []
for k, v in sorted(dict_.items(), key=ut.natural_sort):
if k not in ('full', 'full2', 'full3', 'all'):
sample = dict_[k][0]
if sample not in dict_['sample']:
dict_['sample'].append(sample)
return dict_
# HELPERS -------------------------------------------------------------
# Material Manipulations
@classmethod
def _convert_material_parameters(cls, mat_props):
'''Handle exceptions for converting input material dict in Standard Form.
Returns
-------
dict
Material properties converted to Standard Form.
Raises
------
TypeError
If mat_props is neither in Quick or Standard Form, but requires to
be written as a nested dict.
'''
try:
if mat_props is None:
dict_prop = {}
# Nested dict (Standard Form), directly assign
elif isinstance(mat_props, dict):
# Standard (Nested) Dict
mat_props['Modulus'].keys() # needed; triggers KeyError if not Standard Form
##_trigger = mat_props['Modulus'].keys() # needed; triggers KeyError if not Standard Form
dict_prop = mat_props
else:
raise TypeError('Nested dict of material parameters required. See Tutorial.')
except(KeyError):
# Un-nested dict (Quick Form), convert, then assign
# Assumes Quick Form; attempts to convert
##print('Converting mat_props to Standard Form...')
logging.info('Converting mat_props to Standard Form...')
dict_prop = cls._to_standard_dict(mat_props)
#dict_prop = la.distributions.Case._to_standard_dict(mat_props)
#print(dict_prop)
return dict_prop
@classmethod
def _to_standard_dict(cls, dict_, mat_properties=['Modulus', 'Poissons']):
'''Return dict from Quick Form to Standard Form (DataFrame-friendly).
Quick Form assumes input dict lists values ordered by
Modulus and Poisson's Ratio respectively. Used internally.
Quick Form: dict of lists
d = {'HA' : [5.2e10, 0.25],
PSu' : [2.7e9, 0.33]}
Standard Form: dict of dicts
d = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
Returns
-------
defaultdict
A dict of materials properties; names as keys, materials:
properties as key-value pairs.
'''
dict_prop = ct.defaultdict(dict)
for idx, k in enumerate(mat_properties):
# Modulus --> idx=0; Poissons --> idx=1
for matl in dict_:
dict_prop[k][matl] = dict_[matl][idx]
return dict_prop
@classmethod
def get_materials(cls, mat_props):
'''Return a list of ordered materials. Order can be overridden by a list.
Parameters
----------
mat_props : dict
Material properties in Standard or Quick Form.
Returns
-------
list
An ordered list of materials; uses a pandas DataFrame to order it.
'''
mat_props_conv = cls._convert_material_parameters(mat_props)
return pd.DataFrame(mat_props_conv).index.values.tolist()
# TODO: Look into why global_vars is used instead of globals
def get_FeatureInput(self, Geometry, load_params=None, mat_props=None,
materials=None, model=None, global_vars=None):
'''Return a FeatureInput for a given Geometry object.
Handles conversions to different formats. Idiomatic approach to building
FeatureInput objects, especially in custom models. All parameters
require user/author input.
Parameters
----------
Geometry : Geometry object
A native data type comprising geometry information.
load_params : dict; default None
Loading parameters.
mat_props : dict; default None
Material parameters.
materials : list; default None
Unique materials in stacking order; > 1 materials assumes alternating
layers. Will be converted to Standard Form.
model : str; default None
Custom model name located in `models` directory.
global_vars : dict, optional; default None
Additional variables that may be locally calculated though
globally pertinent.
Returns
-------
FeatureInput
Essential dict of user-provided values.
See Also
--------
la.distributions.Case.apply() : main creator of FeatureInput objects
la.models.Wilson_LT() : used to build default instance FeatureInput
'''
mat_props_conv = self._convert_material_parameters(mat_props)
if materials is None:
materials = self.get_materials(mat_props_conv)
# TODO: Add Exception handling of materials order list and mat_props here.
FeatureInput = {
'Geometry': Geometry,
'Parameters': load_params,
'Properties': mat_props_conv,
'Materials': materials,
'Model': model,
'Globals': global_vars,
}
return FeatureInput
# Make generators of custom geometry strings or objects
def generate(self, selection=None, geo_inputs=False):
'''Yield a generator of selected geometry strings or objects given a key.
Parameters
----------
selection : list of strings; default None
The strings are key names within the geo_inputs dict.
geo_inputs : bool; default False
If true, uses geo_inputs from BaseDefaults class; else defaults to Geo_objects
See Also
--------
utils.tools.natural_sort : orders `dict.items()` in loops; needed for tests
Examples
--------
>>> from lamana.input_ import BaseDefaults
>>> bdft = BaseDefaults()
>>> bdft.generate()
<itertools.chain at 0x7d1e278> # yields a generator
>>> list(bdft.generate(selection=['5-ply'], geo_inputs=True))
>>> list(gen)
['400-200-800', '400-[200]-800', '400-200-400S'] # geometry strings
>>> list(bdft.generate(selection=['standard'], geo_inputs=False))
[Geometry object (400.0-[200.0]-800.0)] # Geometry object; default
'''
# Default to all strings/objects (not groups) if None selected
try:
# selection='invalid key'
if not set(selection).intersection(self.geo_inputs.keys()):
raise KeyError('Key not found in geo_inputs dict.')
except(TypeError):
# selection=None; default 'all' key
if geo_inputs is True:
return self.geo_inputs['all']
elif geo_inputs is False:
return self.Geo_objects['all']
#print(selection)
else:
# selection=['valid key']
if geo_inputs is True:
dict_ = self.geo_inputs # geometry strings
else:
dict_ = self.Geo_objects # Geometry objects
# Sorted values filtered by selected keys
nested_lists = (dict_[k] for k in selection
if k in sorted(dict_, key=ut.natural_sort))
# print(list(flattened))
return it.chain(*nested_lists) # flattened
| # -----------------------------------------------------------------------------
'''Classes and functions for handling user inputs.'''
# Geometry(): parse user input geometry strings to a tuple of floats (and lists/str)
# BaseDefaults(): library of general geometry defaults; subclassed by the user.
# flake8 input_.py --ignore=E265, E501, N803, N806, N802, N813, E133
import re
import logging
import itertools as it
import collections as ct
import pandas as pd
import lamana as la
#import lamana.lt_exceptions as exc
from lamana.lt_exceptions import FormatError
from lamana.utils import tools as ut
# =============================================================================
# USER INPUT ------------------------------------------------------------------
# =============================================================================
# Parses informations and generates User Input objects, i.e. Geometry()
def tokenize_geostring(geo_string):
'''Return a tuple of tokens; outer, inner_i, middle tokens.'''
# Convert to General Convention
# TODO: Add handling of duples into _to_gen_convention first
# For now, assume strings are formated correctly
#g_conv = la.input_.Geometry._to_gen_convention(geo_string)
# Check is_valid(); if not attempt to_gen_convention
# Prepare string
geo_string = geo_string.upper() # auto uppercase
geo_string = geo_string.replace(' ','') # auto whitespace strip
# Return tokens
tokens = geo_string.split('-')
return tokens
class Geometry(object):
'''Parse input geometry string into floats.
When a single (or a list of) geometry string(s) is passed to the
`lamana.distributions.Case.apply()` method, this class parses those strings
into (outer, [inner], middle, 'S') format'; 'S' is optional.
Here are examples of conventions used to write geometry strings:
- General: outer-[inner_i]-middle
- Short-hand: outer-inner-middle
Formatted in General Convention, a converted namedtuple of the geometry
is returned. Examples of GeometryTuples:
- (400.0, [200.0], 800.0) # multi-ply
- (400.0, [200.0], 800.0, 'S') # multi-ply, symmetric
- (400.0, [100.0, 100.0], 800.0) # multi-ply, [inner_i]
Parameters
----------
geo_input : str or tupled str
Geometry string of layer thicknesses in a laminate.
Attributes
----------
total
total_middle
total_inner
total_inner_i
total_outer
is_symmetric
namedtuple : namedtuple
A GeometryTuple in General Convention, e.g. (400.0, [200.0], 800.0, 'S').
geometry : list
The converted and parsed geometry string.
middle : float
Middle layer thickness.
inner : list of floats
Inner layer thicknesses in micrometers.
outer : float
Outer layer thickness.
string : str
The input geometry string converted to General Convention format.
Examples
--------
>>> g1 = ('0-0-2000') # Monolith
>>> g2 = ('1000-0-0') # Bilayer
>>> g3 = ('600-0-800') # Trilayer
>>> g4 = ('500-500-0') # 4-ply
>>> g5 = ('400-200-800') # Short-hand; <= 5-ply
>>> g6 = ('400-200-400S') # Symmetric
>>> g7 = ('400-[200]-800') # Gen. convention; 5-ply
>>> g8 = ('400-[100,100]-800') # Gen. convention; 7-plys
>>> la.input_.Geometry(g5)
Geometry object (400.0-[200.0]-800.0)
'''
def __init__(self, geo_input):
'''Ideally a want to call Geometry and get namedtuple auto; return self?'''
# TODO: Consolidate into namedtuple or self
# TODO: rename geometrytuple
self.string = self.__class__._to_gen_convention(geo_input)
self.namedtuple = self._parse_geometry(self.string)# a namedtuple; see collections lib
self.geometry = self._parse_geometry(self.string) # a namedtuple; see collections lib
## self.namedtuple = self._parse_geometry(geo_input) # a namedtuple; see collections lib
## self.geometry = self._parse_geometry(geo_input) # a namedtuple; see collections lib
self.middle = self.geometry.middle # attributes from namedtuple; symmetric sensitive
self.inner = self.geometry.inner
self.outer = self.geometry.outer
## self.string = self.__class__._to_gen_convention(geo_input)
# Private attribute used for set comparisons and hashing
# TODO: should we pass in geo_string instead of geo_inputs?
self._geometry_hash = self._parse_geometry(geo_input, hash_=True)
def __str__(self):
'''Returns trimmed geometry string.'''
return self.string
def __repr__(self): # for object calls
'''Returns Geometry object string.'''
return '{} object ({})'.format(self.__class__.__name__, self.string)
def __eq__(self, other):
'''Compare GeometryTuples.'''
if isinstance(other, self.__class__):
#print(self.__dict__)
#print(other.__dict__)
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
'''Allow set comparisons.
The only required property for a hash is that objects which equally
compare have the same hash value (REF 035). `self.__dict__` is unhashable
due to the inner list (lists are mutable, thus unhashable). So a copy is
made called `_geometry_hash` of GeometryTuple where the inner value is
tupled instead.
'''
return hash(self._geometry_hash)
#return hash(tuple(sorted(self.__dict__.items())))
#return hash(self._geo_string)
def _parse_geometry(self, geo_string, hash_=False):
'''Return a namedtuple of outer-inner-middle geometry values.
Per General Convention, a GeometryTuple has floats and an inner list.
Checks for symmetry, handles inner list, then makes the GeometryTuple.
Also can create a hashable version of GeometryTuple; tuple instead of
list for inner_i.
Parameters
---------
geo_string : tupled str or str
outer-inner-middle values or outer-[inner]-middle values;
formatted in general convention (0.4.11).
Returns
-------
namedtuple of mixed types
GeometryTuple: numeric values converted to floats; (outer, [inner], middle,'S?')
'''
def check_symmetry(last):
'''Yield float or str if 'S' is in the last token of the geometry string.
Examples
--------
>>> [i for i in check_symmetry('400S')]
[400.0, 'S']
>>> [i for i in check_symmetry('400')]
[400.0]
'''
if last.endswith('S'):
# Converts last '400S' to [400.0,'S']
#print(type(last))
number = float(last[:-1])
letter = last[-1]
yield number
yield letter
else:
# Converts last '400' to [400.0]
yield float(last)
# Inner Parsing
def parse_inner(inside):
'''Yield values from the inner list of a geometry string.
Also parses inner_i (if multiple inners are found) str to floats.
This is later converted to a list of inner_i, as required by LPEP 001.01.
Examples
--------
>>> list(parse_inner('[100,100]'))
[100.0, 100.0]
>>> list(parse_inner('[200]'))
[200.0]
>>> list(parse_inner('200'))
[200.0]
Raises
------
TypeError
If a non-string is passed in for the geo_string arg.
FormatError
If the parsed geo string is less than 3 tokens.
'''
if inside.startswith('['):
if ',' in inside:
# Convert inside string '[100,100]' to [100.0,100.0]
for item in inside[1:-1].split(','):
yield float(item)
else:
# Convert inside string '[200]' to [200.0]
converted = inside[1:-1] # excludes bracket strings
#print(converted)
yield float(converted)
else:
# Convert insde string '200' to [200] if brackets aren't found
yield float(inside)
# Convert Input String
def _make_GeometryTuple(tokens, hashable=False):
'''Return a namedtuple of floats representing geometry thicknesses.'''
outer = float(tokens[0])
inner_hash = tuple(parse_inner(tokens[1])) # tupled inner for hash()
inner = list(parse_inner(tokens[1])) # complies w LPEP 001.01
middle = tuple(check_symmetry(tokens[-1]))
'''Should warn the object is symmetric though.'''
if 'S' not in geo_string:
GeometryTuple = ct.namedtuple(
'GeometryTuple', ['outer', 'inner', 'middle'])
if hashable:
return GeometryTuple(outer, inner_hash, middle[0])
return GeometryTuple(outer, inner, middle[0])
else:
GeometryTuple = ct.namedtuple(
'GeometryTuple', ['outer', 'inner', 'middle', 'symmetric'])
if hashable:
return GeometryTuple(outer, inner_hash, middle[0], middle[-1])
return GeometryTuple(outer, inner, middle[0], middle[-1])
# Create --------------------------------------------------------------
'''Replace with try-except block.'''
# Tests geo_string is a string
# TODO: change to geo_string
tokens = geo_string.split('-')
# TODO: Find another name for hash_ which is a bool'''
# Gets hash_ bool passed in from parsed_geometry
return _make_GeometryTuple(tokens, hashable=hash_)
##tokens = geo_input.split('-')
##if not isinstance(geo_input, str):
##raise TypeError("Cannot parse input type. Supported types: str")
##elif len(tokens) < 3:
# TODO: Replace with custom exception
##raise exc.FormatError(
## "Input token is too short. Supported geometry string format:"
## " 'outer-[inner_i]-middle'"
##)
##else:
# TODO: Find another name for hash_ which is a bool'''
# Gets hash_ bool from parsed_geometry
##return _make_GeometryTuple(tokens, hashable=hash_)
@classmethod
def _to_gen_convention(cls, geo_input):
'''Return a geometry string converted to general convention.
Handles string-validation Exceptions.
'''
try:
# Check geo_input is a string
tokens = geo_input.split('-')
# Check for letters in the input
any_letters = re.compile('[a-zA-Z]', re.IGNORECASE)
if any_letters.search(geo_input):
all_letters = any_letters.findall(geo_input)
# Raise if more than one letter in the Input
if len(all_letters) > 1:
##raise exc.FormatError(
raise FormatError(
"Input must not contain more than one letter, 'S'."
)
# Raise if 's' or 'S' is not the letter
if not set(all_letters).issubset(['s', 'S']):
##raise exc.FormatError(
raise FormatError(
"Invalid letter detected; only 'S' allowed."
)
if len(tokens) < 3:
##raise exc.FormatError(
raise FormatError(
"Input token is too short. Supported geometry string format:"
" 'outer-[inner_i]-middle'"
)
if len(tokens) > 3:
##raise exc.FormatError(
raise FormatError(
"Input token is too long. Supported geometry string format:"
" 'outer-[inner_i]-middle'"
)
except(AttributeError):
# Needed in general and for distributions.Cases()
raise TypeError(
"Cannot parse input type. Supported types: str. {} given.".format(geo_input)
)
first = tokens[0]
inside = tokens[1]
last = tokens[-1]
#print(inside)
# Convert strings to general convention
first = str(float(first))
if inside.startswith('['):
# Convert inside string '[100,100]' to '100.0,100.0'
if ',' in inside:
converted = [str(float(item)) for item in inside[1:-1].split(',')]
inside = ','.join(converted)
else:
# Convert inside string '[200]' to '200.0'
inside = str(float(inside[1:-1]))
elif not inside.startswith('['):
# Convert inside string '200' to 200.0 if brackets aren't found
inside = str(float(inside))
# Always add brackets
inside = ''.join(['[', inside, ']'])
#print('Converting geometry string to general convention.')
if last.endswith('S'):
last = str(float(last[:-1]))
last = ''.join([last, 'S'])
elif not last.endswith('S'):
last = str(float(last))
geo_string = '-'.join([first, inside, last])
return geo_string
# API --------------------------------------------------------------------
# DEPRECATED: itotal() in 0.4.3d
@property
def total(self):
'''Calculate total thickness for laminates using any convention.'''
if self.is_symmetric:
factor = 2
else:
factor = 1
return 2 * self.outer + 2 * sum(self.inner) + factor * self.middle
@property
def total_middle(self):
'''Calculate total thickness for middle lamina using any convention.'''
if self.is_symmetric:
return 2 * self.middle
else:
return self.middle
@property
def total_inner(self):
'''Calculate total thickness for inner lamina using any convention.'''
return 2 * sum(self.inner)
@property
def total_inner_i(self):
'''Calculate total thickness for the ith inner lamina.'''
result = [inner_i * 2 for inner_i in self.inner]
return result
@property
def total_outer(self):
'''Calculate total thickness for outer lamina using any convention.'''
return 2 * self.outer
@property
def is_symmetric(self):
'''Return True if 'S' convention is used.
Examples
--------
>>> g5 = ('400-200-800') # Short-hand use; <= 5-ply
>>> g6 = ('400-200-400S') # Symmetric
>>> for geo in [g5, g6]
... geo.is_symmetric
False True
'''
return 'S' in self.geometry
# -----------------------------------------------------------------------------
# UTILITY
# -----------------------------------------------------------------------------
# Supplies basic dicts and methods by supplying custom case-building information.
class BaseDefaults(object):
'''Common geometry strings, objects and methods for building defaults.
Allows quick access to default parameters. It is useful for consistent testing.
Users can subclass geometric defaults and add specific parameters
(loading, material, geometric, etc.) to improve start-up and reduce
redundant code. Here are some objects that can be found in and subclassed
from this base class:
- Base : Default geometry strings
- Base : Default Geometry objects
- Subclass : Default material and geometric/loading parameters
- Subclass : Default FeatureInputs
Defaults are maintained in two dicts:
- `geo_inputs` : A dict of standard geometry strings and special groups.
- `Geo_objects` : A dict of converted geo_inputs into Geometry objects.
Methods
-------
get_FeatureInput(Geometry, load_params=None, mat_props=None, **kwargs)
Return a dict of the basic FeatureInput object; subclass in a model.
get_materials(mat_props)
Return a list of materials in order from a mat_props dict or DataFrame.
generate(selection=None, geo_inputs=False)
Yield a generator of selected geometries.
Notes
-----
DEV: add entries to the Default dicts. Removing existing dict entries or
"trimming" the Default dicts will break tests (not recommended).
Material properties, geometric/loading parameters and FeatureInput cannot be
generalized and are thus left to the author to define in their custom defaults
subclass. The best place to customize this is in a custom models module.
Examples
--------
Base: Idiomatic instantiation of Base Defaults
>>> bdft = BaseDefaults() # instantiation
Access a set of built-in geometry strings
>>> bdft.geos_most # list of geometry Input strings
[('0-0-2000'), ('1000-0-0'), ('600-0-800'),
('500-500-0'), ('400-200-800')]
Access a set of built-in Geometry objects (converted geometry strings)
>>> bdft.Geos_simple # list of Geometry objects
[<Geometry object ('0-0-2000')>,
<Geometry object ('1000-0-0')>,
<Geometry object '600-0-800')>,
<Geometry object ('500-500-0')>,]
Subclass: Idiomatic import and instantiation of custom Defaults (see Wilson_LT ex.)
>>> from lamana.models import Wilson_LT as wlt # user-implemmented Defaults
>>> dft = wlt.Defaults() # subclassed from BaseDefaults
Access Defaults loading parameters, material properties and FeatureInput
>>> dft.load_params
{'R': 12e-3, 'a': 7.5e-3, 'p': 1, 'P_a': 1, 'r': 2e-4}
>>> dft.mat_props
{'HA': [5.2e10, 0.25], 'PSu': [2.7e9, 0.33],}
>>> dft.FeatureInput
{'Geometry': '400-[200]-800',
'Geometric': {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,},
'Materials': {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],},
'Custom': None,
'Model': Wilson_LT,
'Globals': None,}
Reassign Defaults instances (e.g. R, p)
>>> dft.load_params = {
... 'R': 50e-3, 'a': 7.5e-3, 'p' : 5,
... 'P_a': 1, 'r': 2e-4,
... }
>>> dft.load_params
{'R': 50e-3, 'a': 7.5e-3, 'p' : 5, 'P_a': 1, 'r': 2e-4,}
'''
def __init__(self):
# TODO: Add BaseDefaults attributes to claim the namespace
# i.e, load_params = None, mat_props = None, FeatureInput = None
# Consider this architexture rather than leave the author with oneous to def vars
# Geometry Input Strings
# DEV: Add geometry strings here. Do not remove.
self.geo_inputs = {
'1-ply': ['0-0-2000', '0-0-1000'],
'2-ply': ['1000-0-0'],
'3-ply': ['600-0-800', '600-0-400S'],
'4-ply': ['500-500-0', '400-[200]-0'],
'5-ply': ['400-200-800', '400-[200]-800', '400-200-400S'],
'6-ply': ['400-[100,100]-0', '500-[250,250]-0'],
'7-ply': ['400-[100,100]-800', '400-[100,100]-400S'],
'9-ply': ['400-[100,100,100]-800'],
'10-ply': ['500-[50,50,50,50]-0'],
'11-ply': ['400-[100,100,100,100]-800'],
'13-ply': ['400-[100,100,100,100,100]-800'],
}
# To add keys, first add logic to automate appending dict_ in groupify
# This should add a key to geo_inputs. Geo_objects will auto-mimic this logic.
# Then define attributes (below) of geo_inputs and Geo_objects for API access.
self.geo_inputs = self._groupify_dict(self.geo_inputs) # needed for next line
self.Geo_objects = self._groupify_dict(self.geo_inputs, Geo_obj=True)
# ATTRIBUTES ----------------------------------------------------------
# Quick access to geometry related groups
# Geometry Input String Attributes
self.geos_even = self.geo_inputs['even']
self.geos_odd = self.geo_inputs['odd']
self.geos_most = self.geo_inputs['most']
self.geos_special = self.geo_inputs['special']
self.geos_full = self.geo_inputs['full']
self.geos_full2 = self.geo_inputs['full2']
self.geos_full3 = self.geo_inputs['full3']
self.geos_all = self.geo_inputs['all']
self.geos_standard = self.geo_inputs['standard']
self.geos_symmetric = self.geo_inputs['symmetric']
self.geos_inner_i = self.geo_inputs['inner_i']
self.geos_general = self.geo_inputs['general conv.']
self.geos_unconventional = self.geo_inputs['unconventional']
self.geos_dissimilar = self.geo_inputs['dissimilar']
self.geos_sample = self.geo_inputs['sample']
# TODO: must be way to automate these assignments; reduce redundancy.
# Geometry Object Attributes
self.Geos_even = self.Geo_objects['even']
self.Geos_odd = self.Geo_objects['odd']
self.Geos_most = self.Geo_objects['most']
self.Geos_special = self.Geo_objects['special']
self.Geos_full = self.Geo_objects['full']
self.Geos_full2 = self.Geo_objects['full2']
self.Geos_full3 = self.Geo_objects['full3']
self.Geos_all = self.Geo_objects['all']
self.Geos_standard = self.Geo_objects['standard']
self.Geos_symmetric = self.Geo_objects['symmetric']
self.Geos_inner_i = self.Geo_objects['inner_i']
self.Geos_general = self.Geo_objects['general conv.']
self.Geos_unconventional = self.Geo_objects['unconventional']
self.Geos_dissimilar = self.Geo_objects['dissimilar']
self.geos_sample = self.geo_inputs['sample']
@classmethod
def _extract_number(cls, string):
'''Return integer of numerics found in a string, i.e. '5-ply' --> 5.'''
for s in string.split('-'): # separate ply from number
if s.isdigit():
#print(s)
return int(s)
@classmethod
def _groupify_dict(cls, dict_default, Geo_obj=False):
'''Return a dict of logical groups.
This method is useful for automating groupings; new ply keys or
values can be added to the dict with relative ease.
Parameters
----------
dict_default : dict
Given a dict of "plies (str):geo strings (list)", key-value pairs.
Geo_obj : bool; Default False
True if a returned dict is desired of Geometry objects.
Returns
-------
dict
Keys of specified groups.
See Also
--------
utils.tools.natural_sort : order dict.items() in loops; needed for tests
Notes
-----
This methods requires:
- name keys by number of plies; e.g. '14-ply' (human sorts)
- add values as a list of strings
- add to the geo_inputs dict (Geo_objects mimics and updates automatically)
'''
d = ct.defaultdict(list)
dict_ = dict_default.copy()
# Sort dict naturally to help order the list values
##for k,v in sorted(dict_.items(), key=natural_sort):
for k, v in sorted(dict_.items(), key=ut.natural_sort):
# Prepare k, v
num = cls._extract_number(k)
if Geo_obj: # build Geo_objects simul.
G = la.input_.Geometry
v = [G(geo_string) for geo_string in v]
dict_[k] = v # overwrite original dict_
# Group keys
if (num is not None) and (num % 2 == 0):
#print(num)
d['even'].extend(v)
elif (num is not None) and (num % 2 != 0):
d['odd'].extend(v)
if (num is not None) and (num <= 5):
d['most'].append(v[0])
if (num is not None) and (num < 5):
d['special'].append(v[0])
if (num is not None) and (num == 5):
d['standard'].append(v[1])
if (num is not None) and (num <= 9):
#print(num)
d['full'].append(v[0])
if num is not None:
d['all'].extend(v)
# Inside strings
if not Geo_obj:
for geo_string in v:
if 'S' in geo_string:
#print(geo_string)
d['symmetric'].append(geo_string)
if ('[' in geo_string) and (',' in geo_string):
d['inner_i'].append(geo_string)
if '[' in geo_string:
#print(geo_string)
d['general conv.'].append(geo_string)
# TODO: refactor logic; test_BaseDefaults_unconventional1() should cover this
if '[' not in geo_string:
d['unconventional'].append(geo_string)
dict_.update(d)
# Post-fix groups; manual
# Note, manually adding strings here (vs. __init__) won't be grouped in Geo_object
# You must add to geo_input; the Geo_obj will be auto-made.
if Geo_obj:
'''Make smarter; look for unequal inners then make dissimilar.'''
dict_['dissimilar'] = [G('400-[150,50]-800'), G('400-[25,125,50]-800')]
else:
dict_['dissimilar'] = ['400-[150,50]-800', '400-[25,125,50]-800', ]
# Dict_references are auto added to Geo_obects
dict_['full2'] = dict_['full'] + dict_['symmetric']
dict_['full3'] = dict_['full2']
dict_['full3'].append(dict_['6-ply'][1])
dict_['full3'].append(dict_['10-ply'][0])
# A group that samples the first item of other groups except fulls and all
dict_['sample'] = []
for k, v in sorted(dict_.items(), key=ut.natural_sort):
if k not in ('full', 'full2', 'full3', 'all'):
sample = dict_[k][0]
if sample not in dict_['sample']:
dict_['sample'].append(sample)
return dict_
# HELPERS -------------------------------------------------------------
# Material Manipulations
@classmethod
def _convert_material_parameters(cls, mat_props):
'''Handle exceptions for converting input material dict in Standard Form.
Returns
-------
dict
Material properties converted to Standard Form.
Raises
------
TypeError
If mat_props is neither in Quick or Standard Form, but requires to
be written as a nested dict.
'''
try:
if mat_props is None:
dict_prop = {}
# Nested dict (Standard Form), directly assign
elif isinstance(mat_props, dict):
# Standard (Nested) Dict
mat_props['Modulus'].keys() # needed; triggers KeyError if not Standard Form
##_trigger = mat_props['Modulus'].keys() # needed; triggers KeyError if not Standard Form
dict_prop = mat_props
else:
raise TypeError('Nested dict of material parameters required. See Tutorial.')
except(KeyError):
# Un-nested dict (Quick Form), convert, then assign
# Assumes Quick Form; attempts to convert
##print('Converting mat_props to Standard Form...')
logging.info('Converting mat_props to Standard Form...')
dict_prop = cls._to_standard_dict(mat_props)
#dict_prop = la.distributions.Case._to_standard_dict(mat_props)
#print(dict_prop)
return dict_prop
@classmethod
def _to_standard_dict(cls, dict_, mat_properties=['Modulus', 'Poissons']):
'''Return dict from Quick Form to Standard Form (DataFrame-friendly).
Quick Form assumes input dict lists values ordered by
Modulus and Poisson's Ratio respectively. Used internally.
Quick Form: dict of lists
d = {'HA' : [5.2e10, 0.25],
PSu' : [2.7e9, 0.33]}
Standard Form: dict of dicts
d = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
Returns
-------
defaultdict
A dict of materials properties; names as keys, materials:
properties as key-value pairs.
'''
dict_prop = ct.defaultdict(dict)
for idx, k in enumerate(mat_properties):
# Modulus --> idx=0; Poissons --> idx=1
for matl in dict_:
dict_prop[k][matl] = dict_[matl][idx]
return dict_prop
@classmethod
def get_materials(cls, mat_props):
'''Return a list of ordered materials. Order can be overridden by a list.
Parameters
----------
mat_props : dict
Material properties in Standard or Quick Form.
Returns
-------
list
An ordered list of materials; uses a pandas DataFrame to order it.
'''
mat_props_conv = cls._convert_material_parameters(mat_props)
return pd.DataFrame(mat_props_conv).index.values.tolist()
# TODO: Look into why global_vars is used instead of globals
def get_FeatureInput(self, Geometry, load_params=None, mat_props=None,
materials=None, model=None, global_vars=None):
'''Return a FeatureInput for a given Geometry object.
Handles conversions to different formats. Idiomatic approach to building
FeatureInput objects, especially in custom models. All parameters
require user/author input.
Parameters
----------
Geometry : Geometry object
A native data type comprising geometry information.
load_params : dict; default None
Loading parameters.
mat_props : dict; default None
Material parameters.
materials : list; default None
Unique materials in stacking order; > 1 materials assumes alternating
layers. Will be converted to Standard Form.
model : str; default None
Custom model name located in `models` directory.
global_vars : dict, optional; default None
Additional variables that may be locally calculated though
globally pertinent.
Returns
-------
FeatureInput
Essential dict of user-provided values.
See Also
--------
la.distributions.Case.apply() : main creator of FeatureInput objects
la.models.Wilson_LT() : used to build default instance FeatureInput
'''
mat_props_conv = self._convert_material_parameters(mat_props)
if materials is None:
materials = self.get_materials(mat_props_conv)
# TODO: Add Exception handling of materials order list and mat_props here.
FeatureInput = {
'Geometry': Geometry,
'Parameters': load_params,
'Properties': mat_props_conv,
'Materials': materials,
'Model': model,
'Globals': global_vars,
}
return FeatureInput
# Make generators of custom geometry strings or objects
def generate(self, selection=None, geo_inputs=False):
'''Yield a generator of selected geometry strings or objects given a key.
Parameters
----------
selection : list of strings; default None
The strings are key names within the geo_inputs dict.
geo_inputs : bool; default False
If true, uses geo_inputs from BaseDefaults class; else defaults to Geo_objects
See Also
--------
utils.tools.natural_sort : orders `dict.items()` in loops; needed for tests
Examples
--------
>>> from lamana.input_ import BaseDefaults
>>> bdft = BaseDefaults()
>>> bdft.generate()
<itertools.chain at 0x7d1e278> # yields a generator
>>> list(bdft.generate(selection=['5-ply'], geo_inputs=True))
>>> list(gen)
['400-200-800', '400-[200]-800', '400-200-400S'] # geometry strings
>>> list(bdft.generate(selection=['standard'], geo_inputs=False))
[Geometry object (400.0-[200.0]-800.0)] # Geometry object; default
'''
# Default to all strings/objects (not groups) if None selected
try:
# selection='invalid key'
if not set(selection).intersection(self.geo_inputs.keys()):
raise KeyError('Key not found in geo_inputs dict.')
except(TypeError):
# selection=None; default 'all' key
if geo_inputs is True:
return self.geo_inputs['all']
elif geo_inputs is False:
return self.Geo_objects['all']
#print(selection)
else:
# selection=['valid key']
if geo_inputs is True:
dict_ = self.geo_inputs # geometry strings
else:
dict_ = self.Geo_objects # Geometry objects
# Sorted values filtered by selected keys
nested_lists = (dict_[k] for k in selection
if k in sorted(dict_, key=ut.natural_sort))
# print(list(flattened))
return it.chain(*nested_lists) # flattened | en | 0.535416 | # ----------------------------------------------------------------------------- Classes and functions for handling user inputs. # Geometry(): parse user input geometry strings to a tuple of floats (and lists/str) # BaseDefaults(): library of general geometry defaults; subclassed by the user. # flake8 input_.py --ignore=E265, E501, N803, N806, N802, N813, E133 #import lamana.lt_exceptions as exc # ============================================================================= # USER INPUT ------------------------------------------------------------------ # ============================================================================= # Parses informations and generates User Input objects, i.e. Geometry() Return a tuple of tokens; outer, inner_i, middle tokens. # Convert to General Convention # TODO: Add handling of duples into _to_gen_convention first # For now, assume strings are formated correctly #g_conv = la.input_.Geometry._to_gen_convention(geo_string) # Check is_valid(); if not attempt to_gen_convention # Prepare string # auto uppercase # auto whitespace strip # Return tokens Parse input geometry string into floats. When a single (or a list of) geometry string(s) is passed to the `lamana.distributions.Case.apply()` method, this class parses those strings into (outer, [inner], middle, 'S') format'; 'S' is optional. Here are examples of conventions used to write geometry strings: - General: outer-[inner_i]-middle - Short-hand: outer-inner-middle Formatted in General Convention, a converted namedtuple of the geometry is returned. Examples of GeometryTuples: - (400.0, [200.0], 800.0) # multi-ply - (400.0, [200.0], 800.0, 'S') # multi-ply, symmetric - (400.0, [100.0, 100.0], 800.0) # multi-ply, [inner_i] Parameters ---------- geo_input : str or tupled str Geometry string of layer thicknesses in a laminate. Attributes ---------- total total_middle total_inner total_inner_i total_outer is_symmetric namedtuple : namedtuple A GeometryTuple in General Convention, e.g. (400.0, [200.0], 800.0, 'S'). geometry : list The converted and parsed geometry string. middle : float Middle layer thickness. inner : list of floats Inner layer thicknesses in micrometers. outer : float Outer layer thickness. string : str The input geometry string converted to General Convention format. Examples -------- >>> g1 = ('0-0-2000') # Monolith >>> g2 = ('1000-0-0') # Bilayer >>> g3 = ('600-0-800') # Trilayer >>> g4 = ('500-500-0') # 4-ply >>> g5 = ('400-200-800') # Short-hand; <= 5-ply >>> g6 = ('400-200-400S') # Symmetric >>> g7 = ('400-[200]-800') # Gen. convention; 5-ply >>> g8 = ('400-[100,100]-800') # Gen. convention; 7-plys >>> la.input_.Geometry(g5) Geometry object (400.0-[200.0]-800.0) Ideally a want to call Geometry and get namedtuple auto; return self? # TODO: Consolidate into namedtuple or self # TODO: rename geometrytuple # a namedtuple; see collections lib # a namedtuple; see collections lib ## self.namedtuple = self._parse_geometry(geo_input) # a namedtuple; see collections lib ## self.geometry = self._parse_geometry(geo_input) # a namedtuple; see collections lib # attributes from namedtuple; symmetric sensitive ## self.string = self.__class__._to_gen_convention(geo_input) # Private attribute used for set comparisons and hashing # TODO: should we pass in geo_string instead of geo_inputs? Returns trimmed geometry string. # for object calls Returns Geometry object string. Compare GeometryTuples. #print(self.__dict__) #print(other.__dict__) Allow set comparisons. The only required property for a hash is that objects which equally compare have the same hash value (REF 035). `self.__dict__` is unhashable due to the inner list (lists are mutable, thus unhashable). So a copy is made called `_geometry_hash` of GeometryTuple where the inner value is tupled instead. #return hash(tuple(sorted(self.__dict__.items()))) #return hash(self._geo_string) Return a namedtuple of outer-inner-middle geometry values. Per General Convention, a GeometryTuple has floats and an inner list. Checks for symmetry, handles inner list, then makes the GeometryTuple. Also can create a hashable version of GeometryTuple; tuple instead of list for inner_i. Parameters --------- geo_string : tupled str or str outer-inner-middle values or outer-[inner]-middle values; formatted in general convention (0.4.11). Returns ------- namedtuple of mixed types GeometryTuple: numeric values converted to floats; (outer, [inner], middle,'S?') Yield float or str if 'S' is in the last token of the geometry string. Examples -------- >>> [i for i in check_symmetry('400S')] [400.0, 'S'] >>> [i for i in check_symmetry('400')] [400.0] # Converts last '400S' to [400.0,'S'] #print(type(last)) # Converts last '400' to [400.0] # Inner Parsing Yield values from the inner list of a geometry string. Also parses inner_i (if multiple inners are found) str to floats. This is later converted to a list of inner_i, as required by LPEP 001.01. Examples -------- >>> list(parse_inner('[100,100]')) [100.0, 100.0] >>> list(parse_inner('[200]')) [200.0] >>> list(parse_inner('200')) [200.0] Raises ------ TypeError If a non-string is passed in for the geo_string arg. FormatError If the parsed geo string is less than 3 tokens. # Convert inside string '[100,100]' to [100.0,100.0] # Convert inside string '[200]' to [200.0] # excludes bracket strings #print(converted) # Convert insde string '200' to [200] if brackets aren't found # Convert Input String Return a namedtuple of floats representing geometry thicknesses. # tupled inner for hash() # complies w LPEP 001.01 Should warn the object is symmetric though. # Create -------------------------------------------------------------- Replace with try-except block. # Tests geo_string is a string # TODO: change to geo_string # TODO: Find another name for hash_ which is a bool''' # Gets hash_ bool passed in from parsed_geometry ##tokens = geo_input.split('-') ##if not isinstance(geo_input, str): ##raise TypeError("Cannot parse input type. Supported types: str") ##elif len(tokens) < 3: # TODO: Replace with custom exception ##raise exc.FormatError( ## "Input token is too short. Supported geometry string format:" ## " 'outer-[inner_i]-middle'" ##) ##else: # TODO: Find another name for hash_ which is a bool''' # Gets hash_ bool from parsed_geometry ##return _make_GeometryTuple(tokens, hashable=hash_) Return a geometry string converted to general convention. Handles string-validation Exceptions. # Check geo_input is a string # Check for letters in the input # Raise if more than one letter in the Input ##raise exc.FormatError( # Raise if 's' or 'S' is not the letter ##raise exc.FormatError( ##raise exc.FormatError( ##raise exc.FormatError( # Needed in general and for distributions.Cases() #print(inside) # Convert strings to general convention # Convert inside string '[100,100]' to '100.0,100.0' # Convert inside string '[200]' to '200.0' # Convert inside string '200' to 200.0 if brackets aren't found # Always add brackets #print('Converting geometry string to general convention.') # API -------------------------------------------------------------------- # DEPRECATED: itotal() in 0.4.3d Calculate total thickness for laminates using any convention. Calculate total thickness for middle lamina using any convention. Calculate total thickness for inner lamina using any convention. Calculate total thickness for the ith inner lamina. Calculate total thickness for outer lamina using any convention. Return True if 'S' convention is used. Examples -------- >>> g5 = ('400-200-800') # Short-hand use; <= 5-ply >>> g6 = ('400-200-400S') # Symmetric >>> for geo in [g5, g6] ... geo.is_symmetric False True # ----------------------------------------------------------------------------- # UTILITY # ----------------------------------------------------------------------------- # Supplies basic dicts and methods by supplying custom case-building information. Common geometry strings, objects and methods for building defaults. Allows quick access to default parameters. It is useful for consistent testing. Users can subclass geometric defaults and add specific parameters (loading, material, geometric, etc.) to improve start-up and reduce redundant code. Here are some objects that can be found in and subclassed from this base class: - Base : Default geometry strings - Base : Default Geometry objects - Subclass : Default material and geometric/loading parameters - Subclass : Default FeatureInputs Defaults are maintained in two dicts: - `geo_inputs` : A dict of standard geometry strings and special groups. - `Geo_objects` : A dict of converted geo_inputs into Geometry objects. Methods ------- get_FeatureInput(Geometry, load_params=None, mat_props=None, **kwargs) Return a dict of the basic FeatureInput object; subclass in a model. get_materials(mat_props) Return a list of materials in order from a mat_props dict or DataFrame. generate(selection=None, geo_inputs=False) Yield a generator of selected geometries. Notes ----- DEV: add entries to the Default dicts. Removing existing dict entries or "trimming" the Default dicts will break tests (not recommended). Material properties, geometric/loading parameters and FeatureInput cannot be generalized and are thus left to the author to define in their custom defaults subclass. The best place to customize this is in a custom models module. Examples -------- Base: Idiomatic instantiation of Base Defaults >>> bdft = BaseDefaults() # instantiation Access a set of built-in geometry strings >>> bdft.geos_most # list of geometry Input strings [('0-0-2000'), ('1000-0-0'), ('600-0-800'), ('500-500-0'), ('400-200-800')] Access a set of built-in Geometry objects (converted geometry strings) >>> bdft.Geos_simple # list of Geometry objects [<Geometry object ('0-0-2000')>, <Geometry object ('1000-0-0')>, <Geometry object '600-0-800')>, <Geometry object ('500-500-0')>,] Subclass: Idiomatic import and instantiation of custom Defaults (see Wilson_LT ex.) >>> from lamana.models import Wilson_LT as wlt # user-implemmented Defaults >>> dft = wlt.Defaults() # subclassed from BaseDefaults Access Defaults loading parameters, material properties and FeatureInput >>> dft.load_params {'R': 12e-3, 'a': 7.5e-3, 'p': 1, 'P_a': 1, 'r': 2e-4} >>> dft.mat_props {'HA': [5.2e10, 0.25], 'PSu': [2.7e9, 0.33],} >>> dft.FeatureInput {'Geometry': '400-[200]-800', 'Geometric': {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}, 'Materials': {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],}, 'Custom': None, 'Model': Wilson_LT, 'Globals': None,} Reassign Defaults instances (e.g. R, p) >>> dft.load_params = { ... 'R': 50e-3, 'a': 7.5e-3, 'p' : 5, ... 'P_a': 1, 'r': 2e-4, ... } >>> dft.load_params {'R': 50e-3, 'a': 7.5e-3, 'p' : 5, 'P_a': 1, 'r': 2e-4,} # TODO: Add BaseDefaults attributes to claim the namespace # i.e, load_params = None, mat_props = None, FeatureInput = None # Consider this architexture rather than leave the author with oneous to def vars # Geometry Input Strings # DEV: Add geometry strings here. Do not remove. # To add keys, first add logic to automate appending dict_ in groupify # This should add a key to geo_inputs. Geo_objects will auto-mimic this logic. # Then define attributes (below) of geo_inputs and Geo_objects for API access. # needed for next line # ATTRIBUTES ---------------------------------------------------------- # Quick access to geometry related groups # Geometry Input String Attributes # TODO: must be way to automate these assignments; reduce redundancy. # Geometry Object Attributes Return integer of numerics found in a string, i.e. '5-ply' --> 5. # separate ply from number #print(s) Return a dict of logical groups. This method is useful for automating groupings; new ply keys or values can be added to the dict with relative ease. Parameters ---------- dict_default : dict Given a dict of "plies (str):geo strings (list)", key-value pairs. Geo_obj : bool; Default False True if a returned dict is desired of Geometry objects. Returns ------- dict Keys of specified groups. See Also -------- utils.tools.natural_sort : order dict.items() in loops; needed for tests Notes ----- This methods requires: - name keys by number of plies; e.g. '14-ply' (human sorts) - add values as a list of strings - add to the geo_inputs dict (Geo_objects mimics and updates automatically) # Sort dict naturally to help order the list values ##for k,v in sorted(dict_.items(), key=natural_sort): # Prepare k, v # build Geo_objects simul. # overwrite original dict_ # Group keys #print(num) #print(num) # Inside strings #print(geo_string) #print(geo_string) # TODO: refactor logic; test_BaseDefaults_unconventional1() should cover this # Post-fix groups; manual # Note, manually adding strings here (vs. __init__) won't be grouped in Geo_object # You must add to geo_input; the Geo_obj will be auto-made. Make smarter; look for unequal inners then make dissimilar. # Dict_references are auto added to Geo_obects # A group that samples the first item of other groups except fulls and all # HELPERS ------------------------------------------------------------- # Material Manipulations Handle exceptions for converting input material dict in Standard Form. Returns ------- dict Material properties converted to Standard Form. Raises ------ TypeError If mat_props is neither in Quick or Standard Form, but requires to be written as a nested dict. # Nested dict (Standard Form), directly assign # Standard (Nested) Dict # needed; triggers KeyError if not Standard Form ##_trigger = mat_props['Modulus'].keys() # needed; triggers KeyError if not Standard Form # Un-nested dict (Quick Form), convert, then assign # Assumes Quick Form; attempts to convert ##print('Converting mat_props to Standard Form...') #dict_prop = la.distributions.Case._to_standard_dict(mat_props) #print(dict_prop) Return dict from Quick Form to Standard Form (DataFrame-friendly). Quick Form assumes input dict lists values ordered by Modulus and Poisson's Ratio respectively. Used internally. Quick Form: dict of lists d = {'HA' : [5.2e10, 0.25], PSu' : [2.7e9, 0.33]} Standard Form: dict of dicts d = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} Returns ------- defaultdict A dict of materials properties; names as keys, materials: properties as key-value pairs. # Modulus --> idx=0; Poissons --> idx=1 Return a list of ordered materials. Order can be overridden by a list. Parameters ---------- mat_props : dict Material properties in Standard or Quick Form. Returns ------- list An ordered list of materials; uses a pandas DataFrame to order it. # TODO: Look into why global_vars is used instead of globals Return a FeatureInput for a given Geometry object. Handles conversions to different formats. Idiomatic approach to building FeatureInput objects, especially in custom models. All parameters require user/author input. Parameters ---------- Geometry : Geometry object A native data type comprising geometry information. load_params : dict; default None Loading parameters. mat_props : dict; default None Material parameters. materials : list; default None Unique materials in stacking order; > 1 materials assumes alternating layers. Will be converted to Standard Form. model : str; default None Custom model name located in `models` directory. global_vars : dict, optional; default None Additional variables that may be locally calculated though globally pertinent. Returns ------- FeatureInput Essential dict of user-provided values. See Also -------- la.distributions.Case.apply() : main creator of FeatureInput objects la.models.Wilson_LT() : used to build default instance FeatureInput # TODO: Add Exception handling of materials order list and mat_props here. # Make generators of custom geometry strings or objects Yield a generator of selected geometry strings or objects given a key. Parameters ---------- selection : list of strings; default None The strings are key names within the geo_inputs dict. geo_inputs : bool; default False If true, uses geo_inputs from BaseDefaults class; else defaults to Geo_objects See Also -------- utils.tools.natural_sort : orders `dict.items()` in loops; needed for tests Examples -------- >>> from lamana.input_ import BaseDefaults >>> bdft = BaseDefaults() >>> bdft.generate() <itertools.chain at 0x7d1e278> # yields a generator >>> list(bdft.generate(selection=['5-ply'], geo_inputs=True)) >>> list(gen) ['400-200-800', '400-[200]-800', '400-200-400S'] # geometry strings >>> list(bdft.generate(selection=['standard'], geo_inputs=False)) [Geometry object (400.0-[200.0]-800.0)] # Geometry object; default # Default to all strings/objects (not groups) if None selected # selection='invalid key' # selection=None; default 'all' key #print(selection) # selection=['valid key'] # geometry strings # Geometry objects # Sorted values filtered by selected keys # print(list(flattened)) # flattened | 2.92875 | 3 |
appengine/swarming/backend_conversions.py | maruel/swarming | 74 | 6622846 | <gh_stars>10-100
# Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Functions that convert internal to/from Backend API's protoc objects."""
import collections
import copy
import posixpath
from google.appengine.api import app_identity
from google.appengine.api import datastore_errors
from google.protobuf import json_format
import handlers_exceptions
from components import utils
from server import task_request
from server import task_result
from proto.api.internal.bb import backend_pb2
from proto.api.internal.bb import common_pb2
from proto.api.internal.bb import swarming_bb_pb2
# This is the path, relative to the swarming run dir, to the directory that
# contains the mounted swarming named caches. It will be prepended to paths of
# caches defined in swarmbucket configs.
_CACHE_DIR = 'cache'
def compute_task_request(run_task_req):
# type: (backend_pb2.RunTaskRequest) -> Tuple[task_request.TaskRequest,
# Optional[task_request.SecretBytes], task_request.BuildToken]
"""Computes internal ndb objects from a RunTaskRequest.
Raises:
handlers_exceptions.BadRequestException if any `run_task_req` fields are
invalid.
datastore_errors.BadValueError if any converted ndb object values are
invalid.
"""
build_token = task_request.BuildToken(
build_id=run_task_req.build_id,
token=run_task_req.backend_token,
buildbucket_host=run_task_req.buildbucket_host)
# NOTE: secret_bytes cannot be passed via `-secret_bytes` in `command`
# because tasks in swarming can view command details of other tasks.
secret_bytes = None
if run_task_req.secrets:
secret_bytes = task_request.SecretBytes(
secret_bytes=run_task_req.secrets.SerializeToString())
backend_config = ingest_backend_config(run_task_req.backend_config)
slices = _compute_task_slices(run_task_req, backend_config,
secret_bytes is not None)
expiration_ms = sum([s.expiration_secs for s in slices]) * 1000000
# The expiration_ts may be different from run_task_req.start_deadline
# if the last slice's expiration_secs had to be extended to 60s
now = utils.utcnow()
tr = task_request.TaskRequest(
created_ts=now,
task_slices=slices,
expiration_ts=utils.timestamp_to_datetime(
utils.datetime_to_timestamp(now) + expiration_ms),
realm=run_task_req.realm,
name='bb-%s' % run_task_req.build_id,
priority=backend_config.priority,
bot_ping_tolerance_secs=backend_config.bot_ping_tolerance,
service_account=backend_config.service_account,
has_build_token=True)
parent_id = backend_config.parent_run_id
if parent_id:
tr.parent_task_id = parent_id
return tr, secret_bytes, build_token
def ingest_backend_config(req_backend_config):
# type: (struct_pb2.Struct) -> swarming_bb_pb2.SwarmingBackendConfig
json_config = json_format.MessageToJson(req_backend_config)
return json_format.Parse(json_config, swarming_bb_pb2.SwarmingBackendConfig())
def _compute_task_slices(run_task_req, backend_config, has_secret_bytes):
# type: (backend_pb2.RunTaskRequest, swarming_bb_pb2.SwarmingBackendConfig,
# bool) -> Sequence[task_request.TaskSlice]
"""
Raises:
handlers_exceptions.BadRequestException if any `run_task_req` fields are
invalid.
datastore_errors.BadValueError if any converted ndb object values are
invalid.
"""
# {expiration_secs: {'key1': [value1, ...], 'key2': [value1, ...]}
dims_by_exp = collections.defaultdict(lambda: collections.defaultdict(list))
if run_task_req.execution_timeout.nanos:
raise handlers_exceptions.BadRequestException(
'`execution_timeout.nanos` must be 0')
if run_task_req.grace_period.nanos:
raise handlers_exceptions.BadRequestException(
'`grace_period.nanos` must be 0')
for cache in run_task_req.caches:
if cache.wait_for_warm_cache.nanos:
raise handlers_exceptions.BadRequestException(
'cache\'s `wait_for_warm_cache.nanos` must be 0')
if cache.wait_for_warm_cache.seconds:
dims_by_exp[cache.wait_for_warm_cache.seconds]['caches'].append(
cache.name)
for dim in run_task_req.dimensions:
if dim.expiration.nanos:
raise handlers_exceptions.BadRequestException(
'dimension\'s `expiration.nanos` must be 0')
dims_by_exp[dim.expiration.seconds][dim.key].append(dim.value)
base_dims = dims_by_exp.pop(0, {})
for key, values in base_dims.iteritems():
values.sort()
base_slice = task_request.TaskSlice(
# In bb-on-swarming, `wait_for_capacity` is only used for the last slice
# (base_slice) to give named caches some time to show up.
wait_for_capacity=backend_config.wait_for_capacity,
expiration_secs=int(run_task_req.start_deadline.seconds -
utils.time_time()),
properties=task_request.TaskProperties(
caches=[
task_request.CacheEntry(
path=posixpath.join(_CACHE_DIR, cache.path), name=cache.name)
for cache in run_task_req.caches
],
dimensions_data=base_dims,
execution_timeout_secs=run_task_req.execution_timeout.seconds,
grace_period_secs=run_task_req.grace_period.seconds,
command=_compute_command(run_task_req,
backend_config.agent_binary_cipd_filename),
has_secret_bytes=has_secret_bytes,
cipd_input=task_request.CipdInput(packages=[
task_request.CipdPackage(
path='.',
package_name=backend_config.agent_binary_cipd_pkg,
version=backend_config.agent_binary_cipd_vers)
])),
)
if not dims_by_exp:
return [base_slice]
# Initialize task slices with base properties and computed expiration.
last_exp = 0
task_slices = []
for expiration_secs in sorted(dims_by_exp):
slice_exp_secs = expiration_secs - last_exp
task_slices.append(
task_request.TaskSlice(
expiration_secs=slice_exp_secs,
properties=copy.deepcopy(base_slice.properties),
))
last_exp = expiration_secs
# Add extra dimensions for all slices.
extra_dims = collections.defaultdict(list)
for i, (_exp,
dims) in enumerate(sorted(dims_by_exp.iteritems(), reverse=True)):
for key, values in dims.iteritems():
extra_dims[key].extend(values)
props = task_slices[-1 - i].properties
for key, values in extra_dims.iteritems():
props.dimensions.setdefault(key, []).extend(values)
props.dimensions[key].sort()
# Adjust expiration on base_slice and add it as the last slice.
base_slice.expiration_secs = max(base_slice.expiration_secs - last_exp, 60)
task_slices.append(base_slice)
return task_slices
def _compute_command(run_task_req, agent_binary_name):
# type: (backend_pb2.RunTaskRequest, str) -> Sequence[str]
args = [agent_binary_name] + run_task_req.agent_args[:]
args.extend(['-cache-base', _CACHE_DIR, '-task-id', '${SWARMING_TASK_ID}'])
return args
def convert_results_to_tasks(task_results, task_ids):
# type: (Sequence[Union[task_result._TaskResultCommon, None]], Sequence[str])
# -> Sequence[backend_pb2.Task]
"""Converts the given task results to a backend Tasks
The length and order of `task_results` is expected to match those of
`task_ids`.
Raises:
handlers_exceptions.InternalException if any tasks have an
unexpected state.
"""
tasks = []
for i, result in enumerate(task_results):
task = backend_pb2.Task(
id=backend_pb2.TaskID(
target='swarming://%s' % app_identity.get_application_id(),
id=task_ids[i],
))
if result is None:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Swarming task %s not found' % task_ids[i]
tasks.append(task)
continue
if result.state == task_result.State.PENDING:
task.status = common_pb2.SCHEDULED
elif result.state == task_result.State.RUNNING:
task.status = common_pb2.STARTED
elif result.state == task_result.State.EXPIRED:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task expired.'
task.status_details.resource_exhaustion.SetInParent()
task.status_details.timeout.SetInParent()
elif result.state == task_result.State.TIMED_OUT:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task timed out.'
task.status_details.timeout.SetInParent()
elif result.state == task_result.State.BOT_DIED:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task bot died.'
elif result.state in [task_result.State.CANCELED, task_result.State.KILLED]:
task.status = common_pb2.CANCELED
elif result.state == task_result.State.NO_RESOURCE:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task did not start, no resource.'
task.status_details.resource_exhaustion.SetInParent()
elif result.state == task_result.State.COMPLETED:
if result.failure:
task.status = common_pb2.FAILURE
task.summary_html = ('Task completed with failure.')
else:
task.status = common_pb2.SUCCESS
else:
logging.error('Unexpected state for task result: %r', result)
raise handlers_exceptions.InternalException('Unrecognized task status')
# TODO(crbug/1236848): Fill Task.details.
tasks.append(task)
return tasks
| # Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Functions that convert internal to/from Backend API's protoc objects."""
import collections
import copy
import posixpath
from google.appengine.api import app_identity
from google.appengine.api import datastore_errors
from google.protobuf import json_format
import handlers_exceptions
from components import utils
from server import task_request
from server import task_result
from proto.api.internal.bb import backend_pb2
from proto.api.internal.bb import common_pb2
from proto.api.internal.bb import swarming_bb_pb2
# This is the path, relative to the swarming run dir, to the directory that
# contains the mounted swarming named caches. It will be prepended to paths of
# caches defined in swarmbucket configs.
_CACHE_DIR = 'cache'
def compute_task_request(run_task_req):
# type: (backend_pb2.RunTaskRequest) -> Tuple[task_request.TaskRequest,
# Optional[task_request.SecretBytes], task_request.BuildToken]
"""Computes internal ndb objects from a RunTaskRequest.
Raises:
handlers_exceptions.BadRequestException if any `run_task_req` fields are
invalid.
datastore_errors.BadValueError if any converted ndb object values are
invalid.
"""
build_token = task_request.BuildToken(
build_id=run_task_req.build_id,
token=run_task_req.backend_token,
buildbucket_host=run_task_req.buildbucket_host)
# NOTE: secret_bytes cannot be passed via `-secret_bytes` in `command`
# because tasks in swarming can view command details of other tasks.
secret_bytes = None
if run_task_req.secrets:
secret_bytes = task_request.SecretBytes(
secret_bytes=run_task_req.secrets.SerializeToString())
backend_config = ingest_backend_config(run_task_req.backend_config)
slices = _compute_task_slices(run_task_req, backend_config,
secret_bytes is not None)
expiration_ms = sum([s.expiration_secs for s in slices]) * 1000000
# The expiration_ts may be different from run_task_req.start_deadline
# if the last slice's expiration_secs had to be extended to 60s
now = utils.utcnow()
tr = task_request.TaskRequest(
created_ts=now,
task_slices=slices,
expiration_ts=utils.timestamp_to_datetime(
utils.datetime_to_timestamp(now) + expiration_ms),
realm=run_task_req.realm,
name='bb-%s' % run_task_req.build_id,
priority=backend_config.priority,
bot_ping_tolerance_secs=backend_config.bot_ping_tolerance,
service_account=backend_config.service_account,
has_build_token=True)
parent_id = backend_config.parent_run_id
if parent_id:
tr.parent_task_id = parent_id
return tr, secret_bytes, build_token
def ingest_backend_config(req_backend_config):
# type: (struct_pb2.Struct) -> swarming_bb_pb2.SwarmingBackendConfig
json_config = json_format.MessageToJson(req_backend_config)
return json_format.Parse(json_config, swarming_bb_pb2.SwarmingBackendConfig())
def _compute_task_slices(run_task_req, backend_config, has_secret_bytes):
# type: (backend_pb2.RunTaskRequest, swarming_bb_pb2.SwarmingBackendConfig,
# bool) -> Sequence[task_request.TaskSlice]
"""
Raises:
handlers_exceptions.BadRequestException if any `run_task_req` fields are
invalid.
datastore_errors.BadValueError if any converted ndb object values are
invalid.
"""
# {expiration_secs: {'key1': [value1, ...], 'key2': [value1, ...]}
dims_by_exp = collections.defaultdict(lambda: collections.defaultdict(list))
if run_task_req.execution_timeout.nanos:
raise handlers_exceptions.BadRequestException(
'`execution_timeout.nanos` must be 0')
if run_task_req.grace_period.nanos:
raise handlers_exceptions.BadRequestException(
'`grace_period.nanos` must be 0')
for cache in run_task_req.caches:
if cache.wait_for_warm_cache.nanos:
raise handlers_exceptions.BadRequestException(
'cache\'s `wait_for_warm_cache.nanos` must be 0')
if cache.wait_for_warm_cache.seconds:
dims_by_exp[cache.wait_for_warm_cache.seconds]['caches'].append(
cache.name)
for dim in run_task_req.dimensions:
if dim.expiration.nanos:
raise handlers_exceptions.BadRequestException(
'dimension\'s `expiration.nanos` must be 0')
dims_by_exp[dim.expiration.seconds][dim.key].append(dim.value)
base_dims = dims_by_exp.pop(0, {})
for key, values in base_dims.iteritems():
values.sort()
base_slice = task_request.TaskSlice(
# In bb-on-swarming, `wait_for_capacity` is only used for the last slice
# (base_slice) to give named caches some time to show up.
wait_for_capacity=backend_config.wait_for_capacity,
expiration_secs=int(run_task_req.start_deadline.seconds -
utils.time_time()),
properties=task_request.TaskProperties(
caches=[
task_request.CacheEntry(
path=posixpath.join(_CACHE_DIR, cache.path), name=cache.name)
for cache in run_task_req.caches
],
dimensions_data=base_dims,
execution_timeout_secs=run_task_req.execution_timeout.seconds,
grace_period_secs=run_task_req.grace_period.seconds,
command=_compute_command(run_task_req,
backend_config.agent_binary_cipd_filename),
has_secret_bytes=has_secret_bytes,
cipd_input=task_request.CipdInput(packages=[
task_request.CipdPackage(
path='.',
package_name=backend_config.agent_binary_cipd_pkg,
version=backend_config.agent_binary_cipd_vers)
])),
)
if not dims_by_exp:
return [base_slice]
# Initialize task slices with base properties and computed expiration.
last_exp = 0
task_slices = []
for expiration_secs in sorted(dims_by_exp):
slice_exp_secs = expiration_secs - last_exp
task_slices.append(
task_request.TaskSlice(
expiration_secs=slice_exp_secs,
properties=copy.deepcopy(base_slice.properties),
))
last_exp = expiration_secs
# Add extra dimensions for all slices.
extra_dims = collections.defaultdict(list)
for i, (_exp,
dims) in enumerate(sorted(dims_by_exp.iteritems(), reverse=True)):
for key, values in dims.iteritems():
extra_dims[key].extend(values)
props = task_slices[-1 - i].properties
for key, values in extra_dims.iteritems():
props.dimensions.setdefault(key, []).extend(values)
props.dimensions[key].sort()
# Adjust expiration on base_slice and add it as the last slice.
base_slice.expiration_secs = max(base_slice.expiration_secs - last_exp, 60)
task_slices.append(base_slice)
return task_slices
def _compute_command(run_task_req, agent_binary_name):
# type: (backend_pb2.RunTaskRequest, str) -> Sequence[str]
args = [agent_binary_name] + run_task_req.agent_args[:]
args.extend(['-cache-base', _CACHE_DIR, '-task-id', '${SWARMING_TASK_ID}'])
return args
def convert_results_to_tasks(task_results, task_ids):
# type: (Sequence[Union[task_result._TaskResultCommon, None]], Sequence[str])
# -> Sequence[backend_pb2.Task]
"""Converts the given task results to a backend Tasks
The length and order of `task_results` is expected to match those of
`task_ids`.
Raises:
handlers_exceptions.InternalException if any tasks have an
unexpected state.
"""
tasks = []
for i, result in enumerate(task_results):
task = backend_pb2.Task(
id=backend_pb2.TaskID(
target='swarming://%s' % app_identity.get_application_id(),
id=task_ids[i],
))
if result is None:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Swarming task %s not found' % task_ids[i]
tasks.append(task)
continue
if result.state == task_result.State.PENDING:
task.status = common_pb2.SCHEDULED
elif result.state == task_result.State.RUNNING:
task.status = common_pb2.STARTED
elif result.state == task_result.State.EXPIRED:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task expired.'
task.status_details.resource_exhaustion.SetInParent()
task.status_details.timeout.SetInParent()
elif result.state == task_result.State.TIMED_OUT:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task timed out.'
task.status_details.timeout.SetInParent()
elif result.state == task_result.State.BOT_DIED:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task bot died.'
elif result.state in [task_result.State.CANCELED, task_result.State.KILLED]:
task.status = common_pb2.CANCELED
elif result.state == task_result.State.NO_RESOURCE:
task.status = common_pb2.INFRA_FAILURE
task.summary_html = 'Task did not start, no resource.'
task.status_details.resource_exhaustion.SetInParent()
elif result.state == task_result.State.COMPLETED:
if result.failure:
task.status = common_pb2.FAILURE
task.summary_html = ('Task completed with failure.')
else:
task.status = common_pb2.SUCCESS
else:
logging.error('Unexpected state for task result: %r', result)
raise handlers_exceptions.InternalException('Unrecognized task status')
# TODO(crbug/1236848): Fill Task.details.
tasks.append(task)
return tasks | en | 0.721008 | # Copyright 2021 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. Functions that convert internal to/from Backend API's protoc objects. # This is the path, relative to the swarming run dir, to the directory that # contains the mounted swarming named caches. It will be prepended to paths of # caches defined in swarmbucket configs. # type: (backend_pb2.RunTaskRequest) -> Tuple[task_request.TaskRequest, # Optional[task_request.SecretBytes], task_request.BuildToken] Computes internal ndb objects from a RunTaskRequest. Raises: handlers_exceptions.BadRequestException if any `run_task_req` fields are invalid. datastore_errors.BadValueError if any converted ndb object values are invalid. # NOTE: secret_bytes cannot be passed via `-secret_bytes` in `command` # because tasks in swarming can view command details of other tasks. # The expiration_ts may be different from run_task_req.start_deadline # if the last slice's expiration_secs had to be extended to 60s # type: (struct_pb2.Struct) -> swarming_bb_pb2.SwarmingBackendConfig # type: (backend_pb2.RunTaskRequest, swarming_bb_pb2.SwarmingBackendConfig, # bool) -> Sequence[task_request.TaskSlice] Raises: handlers_exceptions.BadRequestException if any `run_task_req` fields are invalid. datastore_errors.BadValueError if any converted ndb object values are invalid. # {expiration_secs: {'key1': [value1, ...], 'key2': [value1, ...]} # In bb-on-swarming, `wait_for_capacity` is only used for the last slice # (base_slice) to give named caches some time to show up. # Initialize task slices with base properties and computed expiration. # Add extra dimensions for all slices. # Adjust expiration on base_slice and add it as the last slice. # type: (backend_pb2.RunTaskRequest, str) -> Sequence[str] # type: (Sequence[Union[task_result._TaskResultCommon, None]], Sequence[str]) # -> Sequence[backend_pb2.Task] Converts the given task results to a backend Tasks The length and order of `task_results` is expected to match those of `task_ids`. Raises: handlers_exceptions.InternalException if any tasks have an unexpected state. # TODO(crbug/1236848): Fill Task.details. | 2.049047 | 2 |
tests/utils/__init__.py | yaak-ai/mapillary-python-sdk | 13 | 6622847 | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
# -*- coding: utf-8 -*-
"""
tests.utils.__init__
This module loads the modules under src/mapillary/utils for tests
:copyright: (c) 2021 Facebook
:license: MIT LICENSE
"""
# Exraction testing
from . import test_extract # noqa: F401
# Filter testing
from . import test_filter # noqa: F401
| # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
# -*- coding: utf-8 -*-
"""
tests.utils.__init__
This module loads the modules under src/mapillary/utils for tests
:copyright: (c) 2021 Facebook
:license: MIT LICENSE
"""
# Exraction testing
from . import test_extract # noqa: F401
# Filter testing
from . import test_filter # noqa: F401
| en | 0.532067 | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com) # -*- coding: utf-8 -*- tests.utils.__init__ This module loads the modules under src/mapillary/utils for tests :copyright: (c) 2021 Facebook :license: MIT LICENSE # Exraction testing # noqa: F401 # Filter testing # noqa: F401 | 1.152469 | 1 |
GridSearch.py | he71dulu/time-aware-pbpm | 10 | 6622848 |
"""
This scripts help to get best parameters for each model from the history of hyperparameter tuning.
"""
import numpy as np
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
"""
GridSearch
......................................
Finding the best set of hyperparameters for each model
Variables
-------------------
HP_NUM_UNITS: list
Number of LSTM units
HP_DROPOUT: list
Dropout rate
HP_OPTIMIZER: list
Optimizer
HP_LEARNING_RATE: list
Learning Rate
data: list
validation loss list
label: list
list of the hyperparameters
name: str
name of the data file
model_choice: str
input for chosing model
num_units: int
LSTM units parameter
dropout_rate:float
Dropoutrate
optimizer:str
Optimizer
learning_rate:float
learning_rate
i: int
helper variable
df: pandas dataframe
captures best hyperparameters
fig: matplotlib object
plot the dataframe
"""
HP_NUM_UNITS = [64, 100]
HP_DROPOUT = [0.0, 0.2]
HP_OPTIMIZER = ['nadam']
HP_LEARNING_RATE = [0.0001,0.0002,0.001,0.002,0.01]
data=[]
label=[]
name=input('Enter name of data file without extension: ')
print('Enter 1: CS Model, 2: TLSTM Model, 3: CS_TLSTM Model')
model_choice=input('Model Number: ')
for units in HP_NUM_UNITS:
for dropout_rate in (HP_DROPOUT):
for optimizer in (HP_OPTIMIZER):
try:
for learning_rate in (HP_LEARNING_RATE):
filename=name+'dict_values(['+str(units)+', '+str(dropout_rate)+', '+ "'{}'".format(optimizer)+', '+str(learning_rate)+'])' #naming convention as per folder structure
with open('history/'+model_choice+'/'+filename,'rb') as fp:
hist=pickle.load(fp)
data.append(np.min(hist['val_loss']))
label.append(str(units)+', '+str(dropout_rate)+', '+ optimizer+', '+str(learning_rate))
print(str(units)+', '+str(dropout_rate)+', '+ optimizer+', '+str(learning_rate),' Min Val_loss',np.min(hist['val_loss']))
print('')
except OSError:
pass
i=np.argmin(data)
print('')
print(' units, dropout_rate, optimizer, learning_rate', 'loss')
print('Best set of parameters is: ', label[i],np.min(data))
df=pd.DataFrame(data={'Units, Drop_out, Opti, Lr':label,'Min_val_loss':data})
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values, colLabels=df.columns, loc='center',fontsize=40,)
fig.tight_layout()
plt.show()
fig.savefig('Results/'+name+'_Model_'+model_choice+".pdf", bbox_inches='tight')
|
"""
This scripts help to get best parameters for each model from the history of hyperparameter tuning.
"""
import numpy as np
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
"""
GridSearch
......................................
Finding the best set of hyperparameters for each model
Variables
-------------------
HP_NUM_UNITS: list
Number of LSTM units
HP_DROPOUT: list
Dropout rate
HP_OPTIMIZER: list
Optimizer
HP_LEARNING_RATE: list
Learning Rate
data: list
validation loss list
label: list
list of the hyperparameters
name: str
name of the data file
model_choice: str
input for chosing model
num_units: int
LSTM units parameter
dropout_rate:float
Dropoutrate
optimizer:str
Optimizer
learning_rate:float
learning_rate
i: int
helper variable
df: pandas dataframe
captures best hyperparameters
fig: matplotlib object
plot the dataframe
"""
HP_NUM_UNITS = [64, 100]
HP_DROPOUT = [0.0, 0.2]
HP_OPTIMIZER = ['nadam']
HP_LEARNING_RATE = [0.0001,0.0002,0.001,0.002,0.01]
data=[]
label=[]
name=input('Enter name of data file without extension: ')
print('Enter 1: CS Model, 2: TLSTM Model, 3: CS_TLSTM Model')
model_choice=input('Model Number: ')
for units in HP_NUM_UNITS:
for dropout_rate in (HP_DROPOUT):
for optimizer in (HP_OPTIMIZER):
try:
for learning_rate in (HP_LEARNING_RATE):
filename=name+'dict_values(['+str(units)+', '+str(dropout_rate)+', '+ "'{}'".format(optimizer)+', '+str(learning_rate)+'])' #naming convention as per folder structure
with open('history/'+model_choice+'/'+filename,'rb') as fp:
hist=pickle.load(fp)
data.append(np.min(hist['val_loss']))
label.append(str(units)+', '+str(dropout_rate)+', '+ optimizer+', '+str(learning_rate))
print(str(units)+', '+str(dropout_rate)+', '+ optimizer+', '+str(learning_rate),' Min Val_loss',np.min(hist['val_loss']))
print('')
except OSError:
pass
i=np.argmin(data)
print('')
print(' units, dropout_rate, optimizer, learning_rate', 'loss')
print('Best set of parameters is: ', label[i],np.min(data))
df=pd.DataFrame(data={'Units, Drop_out, Opti, Lr':label,'Min_val_loss':data})
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values, colLabels=df.columns, loc='center',fontsize=40,)
fig.tight_layout()
plt.show()
fig.savefig('Results/'+name+'_Model_'+model_choice+".pdf", bbox_inches='tight')
| en | 0.496681 | This scripts help to get best parameters for each model from the history of hyperparameter tuning. GridSearch ...................................... Finding the best set of hyperparameters for each model Variables ------------------- HP_NUM_UNITS: list Number of LSTM units HP_DROPOUT: list Dropout rate HP_OPTIMIZER: list Optimizer HP_LEARNING_RATE: list Learning Rate data: list validation loss list label: list list of the hyperparameters name: str name of the data file model_choice: str input for chosing model num_units: int LSTM units parameter dropout_rate:float Dropoutrate optimizer:str Optimizer learning_rate:float learning_rate i: int helper variable df: pandas dataframe captures best hyperparameters fig: matplotlib object plot the dataframe #naming convention as per folder structure | 3.201799 | 3 |
backend/desk_application/sources/serializers/desk_serializer.py | heyImDrew/edupro | 0 | 6622849 | <filename>backend/desk_application/sources/serializers/desk_serializer.py<gh_stars>0
from rest_framework import serializers
class DeskSerializer(serializers.Serializer):
desk_id = serializers.UUIDField()
created_by_id = serializers.IntegerField()
name = serializers.CharField()
description = serializers.CharField()
class DeskCreateSerializer(serializers.Serializer):
name = serializers.CharField()
description = serializers.CharField()
class DeskIdSerializer(serializers.Serializer):
desk_id = serializers.UUIDField()
| <filename>backend/desk_application/sources/serializers/desk_serializer.py<gh_stars>0
from rest_framework import serializers
class DeskSerializer(serializers.Serializer):
desk_id = serializers.UUIDField()
created_by_id = serializers.IntegerField()
name = serializers.CharField()
description = serializers.CharField()
class DeskCreateSerializer(serializers.Serializer):
name = serializers.CharField()
description = serializers.CharField()
class DeskIdSerializer(serializers.Serializer):
desk_id = serializers.UUIDField()
| none | 1 | 1.838824 | 2 | |
synapse/vendor/cashaddress/__init__.py | vishalbelsare/synapse | 216 | 6622850 | # It has been modified for vendored imports.
import synapse.vendor.cashaddress.convert
import synapse.vendor.cashaddress.crypto
__all__ = ['convert', 'convert']
| # It has been modified for vendored imports.
import synapse.vendor.cashaddress.convert
import synapse.vendor.cashaddress.crypto
__all__ = ['convert', 'convert']
| en | 0.990465 | # It has been modified for vendored imports. | 1.115067 | 1 |