hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f719f928dccc27ae9f21364a24c6d3cb460a18a2
| 9,079
|
py
|
Python
|
stacker/tests/test_plan.py
|
GoodRx/stacker
|
0cf1df67b4ae5aeda5845442c84905909101c238
|
[
"BSD-2-Clause"
] | 1
|
2021-11-06T17:01:01.000Z
|
2021-11-06T17:01:01.000Z
|
stacker/tests/test_plan.py
|
GoodRx/stacker
|
0cf1df67b4ae5aeda5845442c84905909101c238
|
[
"BSD-2-Clause"
] | null | null | null |
stacker/tests/test_plan.py
|
GoodRx/stacker
|
0cf1df67b4ae5aeda5845442c84905909101c238
|
[
"BSD-2-Clause"
] | 1
|
2021-11-06T17:00:53.000Z
|
2021-11-06T17:00:53.000Z
|
import unittest
import mock
from stacker.context import Context
from stacker.exceptions import ImproperlyConfigured
from stacker.plan import (
Step,
Plan,
)
from stacker.status import (
COMPLETE,
SKIPPED,
SUBMITTED,
)
from stacker.stack import Stack
from .factories import generate_definition
count = 0
class TestStep(unittest.TestCase):
def setUp(self):
self.context = Context({"namespace": "namespace"})
stack = Stack(
definition=generate_definition("vpc", 1),
context=self.context,
)
self.step = Step(
stack=stack,
run_func=lambda x, y: (x, y),
)
def test_status(self):
self.assertFalse(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.submit()
self.assertTrue(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.complete()
self.assertTrue(self.step.submitted)
self.assertTrue(self.step.completed)
class TestPlan(unittest.TestCase):
def setUp(self):
self.count = 0
self.environment = {"namespace": "namespace"}
self.context = Context(self.environment)
def _run_func(self, stack, **kwargs):
self.count += 1
if not self.count % 2:
return COMPLETE
elif self.count == 9:
return SKIPPED
return SUBMITTED
def test_execute_plan(self):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(self.count, 9)
self.assertEqual(len(plan.list_skipped()), 1)
@mock.patch("stacker.plan.multiprocessing")
def test_execute_plan_with_watchers(self, patched_multiprocessing):
watch_func = mock.MagicMock()
plan = Plan(description="Test", sleep_time=0, watch_func=watch_func)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(self.count, 9)
self.assertEqual(len(plan.list_skipped()), 1)
self.assertEqual(patched_multiprocessing.Process().start.call_count, 5)
# verify we terminate the process when the stack is finished and also
# redundantly terminate the process after execution
self.assertEqual(
patched_multiprocessing.Process().terminate.call_count, 10)
def test_step_must_return_status(self):
plan = Plan(description="Test", sleep_time=0)
stack = Stack(definition=generate_definition("vpc", 1),
context=mock.MagicMock())
plan.add(
stack=stack,
run_func=lambda x, **kwargs: (x),
)
with self.assertRaises(ValueError):
plan.execute()
def test_execute_plan_ensure_parallel_builds(self):
# key: stack_name, value: current iteration
work_states = {}
submitted_state = 0
# It takes 4 iterations for each task to finish
finished_state = 3
def _run_func(stack, *args, **kwargs):
if stack.name not in work_states:
work_states[stack.name] = submitted_state
return SUBMITTED
if work_states[stack.name] == finished_state:
return COMPLETE
work_states[stack.name] += 1
return SUBMITTED
vpc_stack = Stack(definition=generate_definition("vpc", 1),
context=self.context)
web_stack = Stack(
definition=generate_definition("web", 2, requires=[vpc_stack.fqn]),
context=self.context,
)
db_stack = Stack(
definition=generate_definition("db", 3, requires=[vpc_stack.fqn]),
context=self.context,
)
plan = Plan(description="Test", sleep_time=0)
for stack in [vpc_stack, web_stack, db_stack]:
plan.add(
stack=stack,
run_func=_run_func,
requires=stack.requires,
)
parallel_success = False
while not plan._single_run():
vpc_step = plan[vpc_stack.fqn]
web_step = plan[web_stack.fqn]
db_step = plan[db_stack.fqn]
if not vpc_step.completed:
self.assertFalse(web_step.submitted)
self.assertFalse(db_step.submitted)
else:
# If the vpc step is complete, and we see both the web & db
# steps submitted during the same run, then parallel running
# works
if web_step.status == SUBMITTED and \
db_step.status == SUBMITTED:
parallel_success = True
self.assertTrue(parallel_success)
def test_plan_wait_func_must_be_function(self):
with self.assertRaises(ImproperlyConfigured):
Plan(description="Test", wait_func="invalid")
def test_plan_steps_listed_with_fqn(self):
plan = Plan(description="Test", sleep_time=0)
stack = Stack(definition=generate_definition("vpc", 1),
context=self.context)
plan.add(stack=stack, run_func=lambda x, y: (x, y))
steps = plan.list_pending()
self.assertEqual(steps[0][0], stack.fqn)
def test_execute_plan_wait_func_not_called_if_complete(self):
wait_func = mock.MagicMock()
plan = Plan(description="Test", wait_func=wait_func)
def run_func(*args, **kwargs):
return COMPLETE
for i in range(2):
stack = Stack(definition=generate_definition("vpc", i),
context=self.context)
plan.add(
stack=stack,
run_func=run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(wait_func.call_count, 0)
def test_reset_plan(self):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(self.count, 9)
self.assertEqual(len(plan.list_skipped()), 1)
plan.reset()
self.assertEqual(len(plan.list_pending()), len(plan))
def test_reset_after_outline(self):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.outline()
self.assertEqual(len(plan.list_pending()), len(plan))
@mock.patch("stacker.plan.os")
@mock.patch("stacker.plan.open", mock.mock_open(), create=True)
def test_reset_after_dump(self, *args):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.dump("test")
self.assertEqual(len(plan.list_pending()), len(plan))
| 32.894928
| 79
| 0.566142
|
import unittest
import mock
from stacker.context import Context
from stacker.exceptions import ImproperlyConfigured
from stacker.plan import (
Step,
Plan,
)
from stacker.status import (
COMPLETE,
SKIPPED,
SUBMITTED,
)
from stacker.stack import Stack
from .factories import generate_definition
count = 0
class TestStep(unittest.TestCase):
def setUp(self):
self.context = Context({"namespace": "namespace"})
stack = Stack(
definition=generate_definition("vpc", 1),
context=self.context,
)
self.step = Step(
stack=stack,
run_func=lambda x, y: (x, y),
)
def test_status(self):
self.assertFalse(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.submit()
self.assertTrue(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.complete()
self.assertTrue(self.step.submitted)
self.assertTrue(self.step.completed)
class TestPlan(unittest.TestCase):
def setUp(self):
self.count = 0
self.environment = {"namespace": "namespace"}
self.context = Context(self.environment)
def _run_func(self, stack, **kwargs):
self.count += 1
if not self.count % 2:
return COMPLETE
elif self.count == 9:
return SKIPPED
return SUBMITTED
def test_execute_plan(self):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(self.count, 9)
self.assertEqual(len(plan.list_skipped()), 1)
@mock.patch("stacker.plan.multiprocessing")
def test_execute_plan_with_watchers(self, patched_multiprocessing):
watch_func = mock.MagicMock()
plan = Plan(description="Test", sleep_time=0, watch_func=watch_func)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(self.count, 9)
self.assertEqual(len(plan.list_skipped()), 1)
self.assertEqual(patched_multiprocessing.Process().start.call_count, 5)
self.assertEqual(
patched_multiprocessing.Process().terminate.call_count, 10)
def test_step_must_return_status(self):
plan = Plan(description="Test", sleep_time=0)
stack = Stack(definition=generate_definition("vpc", 1),
context=mock.MagicMock())
plan.add(
stack=stack,
run_func=lambda x, **kwargs: (x),
)
with self.assertRaises(ValueError):
plan.execute()
def test_execute_plan_ensure_parallel_builds(self):
work_states = {}
submitted_state = 0
finished_state = 3
def _run_func(stack, *args, **kwargs):
if stack.name not in work_states:
work_states[stack.name] = submitted_state
return SUBMITTED
if work_states[stack.name] == finished_state:
return COMPLETE
work_states[stack.name] += 1
return SUBMITTED
vpc_stack = Stack(definition=generate_definition("vpc", 1),
context=self.context)
web_stack = Stack(
definition=generate_definition("web", 2, requires=[vpc_stack.fqn]),
context=self.context,
)
db_stack = Stack(
definition=generate_definition("db", 3, requires=[vpc_stack.fqn]),
context=self.context,
)
plan = Plan(description="Test", sleep_time=0)
for stack in [vpc_stack, web_stack, db_stack]:
plan.add(
stack=stack,
run_func=_run_func,
requires=stack.requires,
)
parallel_success = False
while not plan._single_run():
vpc_step = plan[vpc_stack.fqn]
web_step = plan[web_stack.fqn]
db_step = plan[db_stack.fqn]
if not vpc_step.completed:
self.assertFalse(web_step.submitted)
self.assertFalse(db_step.submitted)
else:
if web_step.status == SUBMITTED and \
db_step.status == SUBMITTED:
parallel_success = True
self.assertTrue(parallel_success)
def test_plan_wait_func_must_be_function(self):
with self.assertRaises(ImproperlyConfigured):
Plan(description="Test", wait_func="invalid")
def test_plan_steps_listed_with_fqn(self):
plan = Plan(description="Test", sleep_time=0)
stack = Stack(definition=generate_definition("vpc", 1),
context=self.context)
plan.add(stack=stack, run_func=lambda x, y: (x, y))
steps = plan.list_pending()
self.assertEqual(steps[0][0], stack.fqn)
def test_execute_plan_wait_func_not_called_if_complete(self):
wait_func = mock.MagicMock()
plan = Plan(description="Test", wait_func=wait_func)
def run_func(*args, **kwargs):
return COMPLETE
for i in range(2):
stack = Stack(definition=generate_definition("vpc", i),
context=self.context)
plan.add(
stack=stack,
run_func=run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(wait_func.call_count, 0)
def test_reset_plan(self):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.execute()
self.assertEqual(self.count, 9)
self.assertEqual(len(plan.list_skipped()), 1)
plan.reset()
self.assertEqual(len(plan.list_pending()), len(plan))
def test_reset_after_outline(self):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.outline()
self.assertEqual(len(plan.list_pending()), len(plan))
@mock.patch("stacker.plan.os")
@mock.patch("stacker.plan.open", mock.mock_open(), create=True)
def test_reset_after_dump(self, *args):
plan = Plan(description="Test", sleep_time=0)
previous_stack = None
for i in range(5):
overrides = {}
if previous_stack:
overrides["requires"] = [previous_stack.fqn]
stack = Stack(
definition=generate_definition("vpc", i, **overrides),
context=self.context,
)
previous_stack = stack
plan.add(
stack=stack,
run_func=self._run_func,
requires=stack.requires,
)
plan.dump("test")
self.assertEqual(len(plan.list_pending()), len(plan))
| true
| true
|
f719f96e68fd7b17d73ed6b9460ebade8987ebf6
| 4,908
|
py
|
Python
|
parseepo/serialize.py
|
cverluise/parseEPO
|
be1171a0f8e6fcafa711fa291aebb1fc2260d5e6
|
[
"MIT"
] | null | null | null |
parseepo/serialize.py
|
cverluise/parseEPO
|
be1171a0f8e6fcafa711fa291aebb1fc2260d5e6
|
[
"MIT"
] | 3
|
2021-02-02T22:38:50.000Z
|
2021-08-23T20:41:10.000Z
|
parseepo/serialize.py
|
cverluise/parseEPO
|
be1171a0f8e6fcafa711fa291aebb1fc2260d5e6
|
[
"MIT"
] | null | null | null |
import html2text
import pandas as pd
from wasabi import Printer
from parseepo import validate
from parseepo.exception import SingleAttrException
from parseepo.utils import prepare_name
h = html2text.HTML2Text()
msg = Printer()
NAMES = ["EP", "Num", "Ext", "publication_date", "language", "attr", "text"]
NESTED_ATTR = ["TITLE", "CLAIM", "AMEND", "title", "claims", "amendment"]
def format_patent_df(
data: list, prepare_names: bool = False, handle_html: bool = False
):
"""
Return data as a prepared DataFrame from a list of rows
Nb: Input is [publication_number[Row]].
E.g. [['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],
['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],
...
:param data: List[List]
:param prepare_names: bool, True if you want to prepare names for BQ compatibility
:param handle_html: bool, True if you want to handle html
:return: pd.DataFrame
publication_date language attr text publication_number
0 1996-03-06 ... ... ... EP-0700059-A1
1 1996-03-06 ... ... ... EP-0700059-A1
2 1996-03-06 ... ... ... EP-0700059-A1
3 1996-03-06 ... ... ... EP-0700059-A1
4 1996-03-06 ... ... ... EP-0700059-A1
5 1996-03-06 ... ... ... EP-0700059-A1
6 1996-03-06 ... ... ... EP-0700059-A1
"""
df_ = pd.DataFrame(data, columns=NAMES)
df_["publication_number"] = df_["EP"] + "-" + df_["Num"] + "-" + df_["Ext"]
df_ = df_.drop(["EP", "Num", "Ext"], axis=1)
if prepare_names:
df_["attr"] = df_["attr"].apply(lambda x: prepare_name(x, True))
if handle_html:
df_["text"] = df_["text"].apply(lambda x: h.handle(x))
return df_
def unnest_attr(patent_dict: dict, publication_number: str):
"""
Unnest flat attributes returned as nested by the batch aggregation operation in
serialize_patent.
Raises warning if expected flat attributes has multiple values.
:param patent_dict: dict, returned by serialize_patent
:param publication_number: str, e.g. 'EP-0600083-A1'
:return: dict
In:
{ ...,
'PDFEP': {'language': ['en'],
'text': ['https://data.epo.org/publication-server/...']},
}
Out:
{...,
'PDFEP': 'https://data.epo.org/publication-server/...',}
"""
attrs = list(filter(lambda x: x not in NESTED_ATTR, patent_dict.keys()))
for attr in attrs:
val = patent_dict[attr]["text"]
try:
validate.single_attr(val, attr, publication_number)
except SingleAttrException:
msg.warn(
f"{publication_number}: {attr} has more than 1 value. Only the first value "
f"was kept. Add {attr} to the list NESTED_ATTR to fix this behavior."
)
patent_dict.update(
{
attr: {
"text": patent_dict[attr]["text"][0],
"language": patent_dict[attr]["language"][0],
}
}
)
def serialize_patent_df(patent_df: pd.DataFrame):
"""
Return the serialized patent
:param patent_df: pd.DataFrame, returned by format_patent_df
:return: dict
{'ABSTR': '<p id="pa01" num="0001">A device ...',
'CLAIM': {'language': ['en'],
'text': ['<claim id="c-en-0001" ...']},
'DESCR': '<heading id="h0001">Field of ...',
'PDFEP': 'https://data.epo.org/publication-server/...',
'TITLE': {'language': ['de', 'en', 'fr'],
'text': ['VORRICHTUNG ZUM ...',
'DEVICE FOR CONVEYING ...',
"DISPOSITIF D'ACHEMINEMENT ...']},
'publication_date': '1994-06-08',
'publication_number': 'EP-0600083-A1'}
"""
publication_number = patent_df["publication_number"].values[0]
publication_date = patent_df["publication_date"].values[0]
out = (
patent_df.drop(["publication_number", "publication_date"], axis=1)
.groupby("attr")
.aggregate(list)
.T.to_dict()
)
unnest_attr(out, publication_number)
out.update({"publication_number": publication_number})
out.update({"publication_date": publication_date})
return out
def serialize_patent(
data: list, prepare_names: bool = False, handle_html: bool = False
):
"""
Return the serialized patent
:param data: List[List[str]], E.g.
[['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],
['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],
:param prepare_names: bool, True if you want to prepare names for BQ compatibility
:param handle_html: bool, True if you want to handle html
:return: dict
"""
out = format_patent_df(data, prepare_names, handle_html)
out = serialize_patent_df(out)
return out
| 36.355556
| 92
| 0.581296
|
import html2text
import pandas as pd
from wasabi import Printer
from parseepo import validate
from parseepo.exception import SingleAttrException
from parseepo.utils import prepare_name
h = html2text.HTML2Text()
msg = Printer()
NAMES = ["EP", "Num", "Ext", "publication_date", "language", "attr", "text"]
NESTED_ATTR = ["TITLE", "CLAIM", "AMEND", "title", "claims", "amendment"]
def format_patent_df(
data: list, prepare_names: bool = False, handle_html: bool = False
):
df_ = pd.DataFrame(data, columns=NAMES)
df_["publication_number"] = df_["EP"] + "-" + df_["Num"] + "-" + df_["Ext"]
df_ = df_.drop(["EP", "Num", "Ext"], axis=1)
if prepare_names:
df_["attr"] = df_["attr"].apply(lambda x: prepare_name(x, True))
if handle_html:
df_["text"] = df_["text"].apply(lambda x: h.handle(x))
return df_
def unnest_attr(patent_dict: dict, publication_number: str):
attrs = list(filter(lambda x: x not in NESTED_ATTR, patent_dict.keys()))
for attr in attrs:
val = patent_dict[attr]["text"]
try:
validate.single_attr(val, attr, publication_number)
except SingleAttrException:
msg.warn(
f"{publication_number}: {attr} has more than 1 value. Only the first value "
f"was kept. Add {attr} to the list NESTED_ATTR to fix this behavior."
)
patent_dict.update(
{
attr: {
"text": patent_dict[attr]["text"][0],
"language": patent_dict[attr]["language"][0],
}
}
)
def serialize_patent_df(patent_df: pd.DataFrame):
publication_number = patent_df["publication_number"].values[0]
publication_date = patent_df["publication_date"].values[0]
out = (
patent_df.drop(["publication_number", "publication_date"], axis=1)
.groupby("attr")
.aggregate(list)
.T.to_dict()
)
unnest_attr(out, publication_number)
out.update({"publication_number": publication_number})
out.update({"publication_date": publication_date})
return out
def serialize_patent(
data: list, prepare_names: bool = False, handle_html: bool = False
):
out = format_patent_df(data, prepare_names, handle_html)
out = serialize_patent_df(out)
return out
| true
| true
|
f719fa1fa4ebcfaebccce4b33060c2940a53ad43
| 1,159
|
py
|
Python
|
src/ursa/scripts/clean_imu_data.py
|
BillYJT/RR1-IP
|
06946f9c79ae7c5e128d83bded3dafd848d49f58
|
[
"MIT"
] | null | null | null |
src/ursa/scripts/clean_imu_data.py
|
BillYJT/RR1-IP
|
06946f9c79ae7c5e128d83bded3dafd848d49f58
|
[
"MIT"
] | null | null | null |
src/ursa/scripts/clean_imu_data.py
|
BillYJT/RR1-IP
|
06946f9c79ae7c5e128d83bded3dafd848d49f58
|
[
"MIT"
] | 1
|
2020-06-07T00:38:19.000Z
|
2020-06-07T00:38:19.000Z
|
#!/usr/bin/env python
import rospy
import math
from sensor_msgs.msg import Imu
import tf
import tf2_ros
import tf2_geometry_msgs
import geometry_msgs.msg
lastPub = 0
lastClean = 0
def callbackRaw(imu_in):
global lastPub, lastClean
if (lastClean != 0 and lastClean > rospy.Time.now() - rospy.Duration(1)):
return #Don't publish raw data if clean data is available from estimator
if (lastPub == 0 or imu_in.header.stamp > lastPub):
lastPub = imu_in.header.stamp
pub.publish(imu_in)
def callbackData(imu_in):
global lastPub, lastClean
if (lastPub == 0 or imu_in.header.stamp > lastPub):
lastPub = imu_in.header.stamp
lastClean = imu_in.header.stamp
pub.publish(imu_in)
def filter_imu():
rospy.init_node('clean_imu_data', anonymous=True)
subRaw = rospy.Subscriber('/mavros/imu/data_raw', Imu, callbackRaw)
subData = rospy.Subscriber('/mavros/imu/data', Imu, callbackData)
rospy.spin()
if __name__ == '__main__':
rospy.sleep(1)
pub = rospy.Publisher('filtered_imu', Imu, queue_size=10)
filter_imu()
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) #tf buffer length
tf_listener = tf2_ros.TransformListener(tf_buffer)
| 27.595238
| 74
| 0.744607
|
import rospy
import math
from sensor_msgs.msg import Imu
import tf
import tf2_ros
import tf2_geometry_msgs
import geometry_msgs.msg
lastPub = 0
lastClean = 0
def callbackRaw(imu_in):
global lastPub, lastClean
if (lastClean != 0 and lastClean > rospy.Time.now() - rospy.Duration(1)):
return
if (lastPub == 0 or imu_in.header.stamp > lastPub):
lastPub = imu_in.header.stamp
pub.publish(imu_in)
def callbackData(imu_in):
global lastPub, lastClean
if (lastPub == 0 or imu_in.header.stamp > lastPub):
lastPub = imu_in.header.stamp
lastClean = imu_in.header.stamp
pub.publish(imu_in)
def filter_imu():
rospy.init_node('clean_imu_data', anonymous=True)
subRaw = rospy.Subscriber('/mavros/imu/data_raw', Imu, callbackRaw)
subData = rospy.Subscriber('/mavros/imu/data', Imu, callbackData)
rospy.spin()
if __name__ == '__main__':
rospy.sleep(1)
pub = rospy.Publisher('filtered_imu', Imu, queue_size=10)
filter_imu()
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) #tf buffer length
tf_listener = tf2_ros.TransformListener(tf_buffer)
| false
| true
|
f719fb0a5fa90c220d27a523e8d540e39d655557
| 5,183
|
py
|
Python
|
pyampd/ampd.py
|
luigiluz/pyampd
|
cd247030f5a4ccd971da837b9b873cacbd7adfb3
|
[
"MIT"
] | 25
|
2019-04-13T06:39:33.000Z
|
2022-03-11T22:38:46.000Z
|
pyampd/ampd.py
|
luigiluz/pyampd
|
cd247030f5a4ccd971da837b9b873cacbd7adfb3
|
[
"MIT"
] | 5
|
2018-12-05T10:07:20.000Z
|
2021-02-17T09:08:10.000Z
|
pyampd/ampd.py
|
luigiluz/pyampd
|
cd247030f5a4ccd971da837b9b873cacbd7adfb3
|
[
"MIT"
] | 5
|
2020-10-18T12:42:14.000Z
|
2021-07-01T05:32:50.000Z
|
import numpy as np
from scipy.ndimage import uniform_filter1d
from scipy.signal import detrend
def find_peaks_original(x, scale=None, debug=False):
"""Find peaks in quasi-periodic noisy signals using AMPD algorithm.
Automatic Multi-Scale Peak Detection originally proposed in
"An Efficient Algorithm for Automatic Peak Detection in
Noisy Periodic and Quasi-Periodic Signals", Algorithms 2012, 5, 588-603
https://doi.org/10.1109/ICRERA.2016.7884365
Optimized implementation by Igor Gotlibovych, 2018
Parameters
----------
x : ndarray
1-D array on which to find peaks
scale : int, optional
specify maximum scale window size of (2 * scale + 1)
debug : bool, optional
if set to True, return the Local Scalogram Matrix, `LSM`,
and scale with most local maxima, `l`,
together with peak locations
Returns
-------
pks: ndarray
The ordered array of peak indices found in `x`
"""
x = detrend(x)
N = len(x)
L = N // 2
if scale:
L = min(scale, L)
# create LSM matix
LSM = np.zeros((L, N), dtype=bool)
for k in np.arange(1, L):
LSM[k - 1, k:N - k] = (
(x[0:N - 2 * k] < x[k:N - k]) & (x[k:N - k] > x[2 * k:N])
)
# Find scale with most maxima
G = LSM.sum(axis=1)
l_scale = np.argmax(G)
# find peaks that persist on all scales up to l
pks_logical = np.min(LSM[0:l_scale, :], axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, LSM, l_scale
return pks
def find_peaks(x, scale=None, debug=False):
"""Find peaks in quasi-periodic noisy signals using AMPD algorithm.
Extended implementation handles peaks near start/end of the signal.
Optimized implementation by Igor Gotlibovych, 2018
Parameters
----------
x : ndarray
1-D array on which to find peaks
scale : int, optional
specify maximum scale window size of (2 * scale + 1)
debug : bool, optional
if set to True, return the Local Scalogram Matrix, `LSM`,
weigted number of maxima, 'G',
and scale at which G is maximized, `l`,
together with peak locations
Returns
-------
pks: ndarray
The ordered array of peak indices found in `x`
"""
x = detrend(x)
N = len(x)
L = N // 2
if scale:
L = min(scale, L)
# create LSM matix
LSM = np.ones((L, N), dtype=bool)
for k in np.arange(1, L + 1):
LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]
) # compare to right neighbours
LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours
# Find scale with most maxima
G = LSM.sum(axis=1)
G = G * np.arange(
N // 2, N // 2 - L, -1
) # normalize to adjust for new edge regions
l_scale = np.argmax(G)
# find peaks that persist on all scales up to l
pks_logical = np.min(LSM[0:l_scale, :], axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, LSM, G, l_scale
return pks
def find_peaks_adaptive(x, window=None, debug=False):
"""Find peaks in quasi-periodic noisy signals using ASS-AMPD algorithm.
Adaptive Scale Selection Automatic Multi-Scale Peak Detection,
an extension of AMPD -
"An Efficient Algorithm for Automatic Peak Detection in
Noisy Periodic and Quasi-Periodic Signals", Algorithms 2012, 5, 588-603
https://doi.org/10.1109/ICRERA.2016.7884365
Optimized implementation by Igor Gotlibovych, 2018
Parameters
----------
x : ndarray
1-D array on which to find peaks
window : int, optional
sliding window size for adaptive scale selection
debug : bool, optional
if set to True, return the Local Scalogram Matrix, `LSM`,
and `adaptive_scale`,
together with peak locations
Returns
-------
pks: ndarray
The ordered array of peak indices found in `x`
"""
x = detrend(x)
N = len(x)
if not window:
window = N
if window > N:
window = N
L = window // 2
# create LSM matix
LSM = np.ones((L, N), dtype=bool)
for k in np.arange(1, L + 1):
LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]
) # compare to right neighbours
LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours
# Create continuos adaptive LSM
ass_LSM = uniform_filter1d(LSM * window, window, axis=1, mode='nearest')
normalization = np.arange(L, 0, -1) # scale normalization weight
ass_LSM = ass_LSM * normalization.reshape(-1, 1)
# Find adaptive scale at each point
adaptive_scale = ass_LSM.argmax(axis=0)
# construct reduced LSM
LSM_reduced = LSM[:adaptive_scale.max(), :]
mask = (np.indices(LSM_reduced.shape)[0] > adaptive_scale
) # these elements are outside scale of interest
LSM_reduced[mask] = 1
# find peaks that persist on all scales up to l
pks_logical = np.min(LSM_reduced, axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, ass_LSM, adaptive_scale
return pks
| 29.282486
| 78
| 0.605248
|
import numpy as np
from scipy.ndimage import uniform_filter1d
from scipy.signal import detrend
def find_peaks_original(x, scale=None, debug=False):
x = detrend(x)
N = len(x)
L = N // 2
if scale:
L = min(scale, L)
LSM = np.zeros((L, N), dtype=bool)
for k in np.arange(1, L):
LSM[k - 1, k:N - k] = (
(x[0:N - 2 * k] < x[k:N - k]) & (x[k:N - k] > x[2 * k:N])
)
G = LSM.sum(axis=1)
l_scale = np.argmax(G)
pks_logical = np.min(LSM[0:l_scale, :], axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, LSM, l_scale
return pks
def find_peaks(x, scale=None, debug=False):
x = detrend(x)
N = len(x)
L = N // 2
if scale:
L = min(scale, L)
LSM = np.ones((L, N), dtype=bool)
for k in np.arange(1, L + 1):
LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]
)
LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k])
G = LSM.sum(axis=1)
G = G * np.arange(
N // 2, N // 2 - L, -1
)
l_scale = np.argmax(G)
pks_logical = np.min(LSM[0:l_scale, :], axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, LSM, G, l_scale
return pks
def find_peaks_adaptive(x, window=None, debug=False):
x = detrend(x)
N = len(x)
if not window:
window = N
if window > N:
window = N
L = window // 2
LSM = np.ones((L, N), dtype=bool)
for k in np.arange(1, L + 1):
LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]
)
LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k])
ass_LSM = uniform_filter1d(LSM * window, window, axis=1, mode='nearest')
normalization = np.arange(L, 0, -1)
ass_LSM = ass_LSM * normalization.reshape(-1, 1)
adaptive_scale = ass_LSM.argmax(axis=0)
LSM_reduced = LSM[:adaptive_scale.max(), :]
mask = (np.indices(LSM_reduced.shape)[0] > adaptive_scale
)
LSM_reduced[mask] = 1
pks_logical = np.min(LSM_reduced, axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, ass_LSM, adaptive_scale
return pks
| true
| true
|
f719fb9a1924b2e4695d476c0d4c308d07b01506
| 9,413
|
py
|
Python
|
Radiosonde_Data/weekly_cross_section.py
|
peterwilletts24/Python-Scripts
|
975d6b2e2923cbde40d2760eb9574acee2e10388
|
[
"MIT"
] | 4
|
2017-05-24T09:14:14.000Z
|
2019-01-02T19:20:38.000Z
|
Radiosonde_Data/weekly_cross_section.py
|
peterwilletts24/Python-Scripts
|
975d6b2e2923cbde40d2760eb9574acee2e10388
|
[
"MIT"
] | null | null | null |
Radiosonde_Data/weekly_cross_section.py
|
peterwilletts24/Python-Scripts
|
975d6b2e2923cbde40d2760eb9574acee2e10388
|
[
"MIT"
] | 3
|
2017-05-24T09:14:15.000Z
|
2020-09-28T08:32:02.000Z
|
#Monthly
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as ml
import datetime
from dateutil.relativedelta import relativedelta
import re
import numpy as np
from math import sin, cos, atan2, radians, sqrt
import scipy.interpolate
import gc
import pdb
import imp
imp.load_source('GenMeteoFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeneralMeteoFunctions.py')
from GenMeteoFuncs import *
#imp.load_source('SoundingRoutines', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Sounding_Routines.py')
#from SoundingRoutines import *
imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py')
from GeogFuncs import *
Cross_Section_Title = 'Vizag_to_Afghanistan'
station_list_cs=[43150, 42867, 43014, 42339, 40990, 40948]
first_station=43150
date_min=datetime.datetime(2011,5,1,0,0,0)
date_max=datetime.datetime(2011,10,1,0,0,0)
delta = relativedelta(weeks=+1)
def variable_name_index_match(variable, variable_list):
for key, value in variable_list.iteritems(): # iter on both keys and values
if key.startswith('%s' % variable):
arr_index_var=value
return arr_index_var
def variable_cat(var_index, station_list_cs):
var_cat=[]
distances=[]
date_min_max=[]
for stat in station_list_cs:
load_file = np.load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Cross_Section_'
'IND_SOUNDING_INTERP_MEAN_%s_%s_%s_%s_%s.npz'
% (Cross_Section_Title, date_min.strftime('%Y%m%d'), date_max.strftime('%Y%m%d'), delta, stat))
print load_file['date_bin_mean_all_dates_one_station'].shape
if date_min_max ==[]:
date_min_max=np.empty(load_file['min_max_date_bin'].shape)
station_title, station_lon, station_lat = StationInfoSearch(stat)
dist_from_first_station = CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon)
print dist_from_first_station
#print load_file['date_bin_mean_all_dates_one_station'][:,var_index,:].shape
var_cat.append(load_file['date_bin_mean_all_dates_one_station'][:,var_index,:])
distances.append(dist_from_first_station)
#pdb.set_trace()
#if load_file['min_max_date_bin'].any() != np.NAN:
#date_min_max=np.ma.masked_outside(load_file['min_max_date_bin'], date_min, date_max ).data
date_min_max = np.where((load_file['min_max_date_bin']>date_min) & (load_file['min_max_date_bin']<date_max), load_file['min_max_date_bin'], date_min_max )
print np.array(var_cat).shape
print date_min_max
return np.array(var_cat), np.array(distances, dtype=float), date_min_max
def station_name_plot(station_list_cs, first_station, yi):
y_offset_text=0
first_station_title, first_station_lon, first_station_lat = StationInfoSearch(first_station)
for stat in station_list_cs:
station_title, station_lon, station_lat = StationInfoSearch(stat)
dist_from_first_station = CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon)
plt.axvline(x=dist_from_first_station, ymin=0, ymax=1, label=station_title, color='k')
plt.text(dist_from_first_station+0.1,max(yi)/100+20,station_title,rotation=-45)
y_offset_text=+1
def grid_data_cs(pressure, distance, param):
xi=np.linspace(0, max(distance), 200)
#yi=np.linspace(np.nanmin(pressure), np.nanmax(pressure), 500)
yi=np.linspace(5000, 100000, 50) # Points for pressure interpolation
#yi=np.array([1000, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20,10], dtype=float)
#yi=np.array([10, 20, 30, 50, 70, 100, 150, 200, 250, 300, 400, 500, 700, 850, 925, 1000]*100, dtype=float)
try:
zi = ml.griddata(distance, pressure,param,xi, yi, interp='nn')
#zi = scipy.interpolate.griddata((distance, pressure), param, (xi[None,:],yi[:,None]), method='linear')
except Exception, e:
print e
return xi,yi,zi
#return xi,yi
# def plot_rad_cs(xi,yi,zi, min_contour, max_contour):
# clevs = np.linspace(min_contour, max_contour,256)
# ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
# plt.figure(figsize=(14,8))
# cmap=plt.cm.jet
# cont = plt.contourf(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
# cbar = plt.colorbar(cont, orientation='vertical', pad=0.05, extend='both', format = '$%d$')
# #cbar.set_label('$W m^{-2}$')
# cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
# cbar.set_ticklabels(['${%d}$' % i for i in ticks])
# plt.gca().invert_yaxis()
# plt.ylabel('Pressure (hPa)')
# plt.xlabel('km from first station')
# return cont,cbar
def plot_rad_cs_winds(xi,yi,zi, min_contour, max_contour, wind_gridded):
clevs = np.linspace(min_contour, max_contour,256)
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
plt.figure(figsize=(14,8))
cmap=plt.cm.jet
cont = plt.contourf(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
plt.contour(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
cbar = plt.colorbar(cont, orientation='vertical', pad=0.05, extend='both', format = '$%d$')
#cbar.set_label('$W m^{-2}$')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%d}$' % i for i in ticks])
plt.gca().invert_yaxis()
plt.ylabel('Pressure (hPa)')
plt.xlabel('km from first station')
return cont,cbar
# def date_bin_plot(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour):
# nan_mask = np.ma.masked_array(np.array(concat_plot_variable[:,i,:], dtype=float).flatten(), np.isnan(np.array(concat_plot_variable[:,i,:], dtype=float).flatten()))
# #print nan_mask
# print concat_plot_variable.shape
# try:
# if nan_mask.mask.all() == False:
# print nan_mask
# xi,yi, zi = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), nan_mask)
# cont,cbar = plot_rad_cs(xi, yi, zi, min_contour, max_contour)
# station_name_plot(station_list_cs, first_station, yi)
# except Exception, e:
# print e
# return cont,cbar
def date_bin_plot_winds(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour, wind_to_plot):
nan_mask = np.ma.masked_array(np.array(concat_plot_variable[:,i,:], dtype=float).flatten(), np.isnan(np.array(concat_plot_variable[:,i,:], dtype=float).flatten()))
#print nan_mask
print concat_plot_variable.shape
try:
if nan_mask.mask.all() == False:
print nan_mask
xi,yi, zi = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), nan_mask)
xiw,yiw, ziw = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), wind_to_plot[nan_mask.mask])
cont,cbar = plot_rad_cs_winds(xi, yi, zi, min_contour, max_contour, ziw)
station_name_plot(station_list_cs, first_station, yi)
except Exception, e:
print e
return cont,cbar
station_list_search='/nfs/a90/eepdw/Data/Observations/Radiosonde_downloaded_from_NOAA_GUAN/igra-stations.txt'
station_metadata=[]
f = open(station_list_search,'r')
for line in f:
line = line.strip()
line=re.sub(r'([A-Z])\s([A-Z])', r'\1_\2',line)
line=re.sub(r'([A-Z])\s\s([A-Z])', r'\1_\2',line)
station_metadata.append(line.split())
f.close()
first_station_title, first_station_lon, first_station_lat = StationInfoSearch(first_station)
variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5,
'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11, 'theta_e':12, 'theta_e_sat':13}
variable='pressures'
var_index = variable_name_index_match(variable, variable_list)
pressures, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='rel_hum'
var_index = variable_name_index_match(variable, variable_list)
concat_plot_variable, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='windspeeds'
var_index = variable_name_index_match(variable, variable_list)
wind_direction, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='winddirs'
var_index = variable_name_index_match(variable, variable_list)
wind_speed, distances, date_min_max = variable_cat(var_index, station_list_cs)
u_wind,v_wind = UVWinds(wind_direction, wind_speed)
max_contour=100
min_contour=0
tick_interval=10
for i, date_bin in enumerate(date_min_max[:,0]):
try:
cont,cbar = date_bin_plot_wind(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour, v_wind)
cbar.set_label('\%', rotation=90)
print date_bin
plt.title('%s %s Cross-Section of Relative Humidity from Radiosonde Soundings' % (date_bin.strftime("%d %B"), Cross_Section_Title.replace('_',' ') ))
plt.show()
#plt.savefig('/nfs/a90/eepdw/Figures/Radiosonde/Cross_Sections/%s_%s_%s_Relative_Humidity.png' % (Cross_Section_Title, date_bin.strftime("%y"), date_bin.strftime("%d_%B")), format='png', bbox_inches='tight')
plt.close()
plt.clf()
gc.collect()
except Exception, e:
print e
| 37.652
| 210
| 0.725274
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as ml
import datetime
from dateutil.relativedelta import relativedelta
import re
import numpy as np
from math import sin, cos, atan2, radians, sqrt
import scipy.interpolate
import gc
import pdb
import imp
imp.load_source('GenMeteoFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeneralMeteoFunctions.py')
from GenMeteoFuncs import *
imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py')
from GeogFuncs import *
Cross_Section_Title = 'Vizag_to_Afghanistan'
station_list_cs=[43150, 42867, 43014, 42339, 40990, 40948]
first_station=43150
date_min=datetime.datetime(2011,5,1,0,0,0)
date_max=datetime.datetime(2011,10,1,0,0,0)
delta = relativedelta(weeks=+1)
def variable_name_index_match(variable, variable_list):
for key, value in variable_list.iteritems():
if key.startswith('%s' % variable):
arr_index_var=value
return arr_index_var
def variable_cat(var_index, station_list_cs):
var_cat=[]
distances=[]
date_min_max=[]
for stat in station_list_cs:
load_file = np.load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Cross_Section_'
'IND_SOUNDING_INTERP_MEAN_%s_%s_%s_%s_%s.npz'
% (Cross_Section_Title, date_min.strftime('%Y%m%d'), date_max.strftime('%Y%m%d'), delta, stat))
print load_file['date_bin_mean_all_dates_one_station'].shape
if date_min_max ==[]:
date_min_max=np.empty(load_file['min_max_date_bin'].shape)
station_title, station_lon, station_lat = StationInfoSearch(stat)
dist_from_first_station = CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon)
print dist_from_first_station
var_cat.append(load_file['date_bin_mean_all_dates_one_station'][:,var_index,:])
distances.append(dist_from_first_station)
date_min_max = np.where((load_file['min_max_date_bin']>date_min) & (load_file['min_max_date_bin']<date_max), load_file['min_max_date_bin'], date_min_max )
print np.array(var_cat).shape
print date_min_max
return np.array(var_cat), np.array(distances, dtype=float), date_min_max
def station_name_plot(station_list_cs, first_station, yi):
y_offset_text=0
first_station_title, first_station_lon, first_station_lat = StationInfoSearch(first_station)
for stat in station_list_cs:
station_title, station_lon, station_lat = StationInfoSearch(stat)
dist_from_first_station = CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon)
plt.axvline(x=dist_from_first_station, ymin=0, ymax=1, label=station_title, color='k')
plt.text(dist_from_first_station+0.1,max(yi)/100+20,station_title,rotation=-45)
y_offset_text=+1
def grid_data_cs(pressure, distance, param):
xi=np.linspace(0, max(distance), 200)
yi=np.linspace(5000, 100000, 50)
try:
zi = ml.griddata(distance, pressure,param,xi, yi, interp='nn')
except Exception, e:
print e
return xi,yi,zi
_cs_winds(xi,yi,zi, min_contour, max_contour, wind_gridded):
clevs = np.linspace(min_contour, max_contour,256)
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
plt.figure(figsize=(14,8))
cmap=plt.cm.jet
cont = plt.contourf(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
plt.contour(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
cbar = plt.colorbar(cont, orientation='vertical', pad=0.05, extend='both', format = '$%d$')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%d}$' % i for i in ticks])
plt.gca().invert_yaxis()
plt.ylabel('Pressure (hPa)')
plt.xlabel('km from first station')
return cont,cbar
def date_bin_plot_winds(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour, wind_to_plot):
nan_mask = np.ma.masked_array(np.array(concat_plot_variable[:,i,:], dtype=float).flatten(), np.isnan(np.array(concat_plot_variable[:,i,:], dtype=float).flatten()))
print concat_plot_variable.shape
try:
if nan_mask.mask.all() == False:
print nan_mask
xi,yi, zi = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), nan_mask)
xiw,yiw, ziw = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), wind_to_plot[nan_mask.mask])
cont,cbar = plot_rad_cs_winds(xi, yi, zi, min_contour, max_contour, ziw)
station_name_plot(station_list_cs, first_station, yi)
except Exception, e:
print e
return cont,cbar
station_list_search='/nfs/a90/eepdw/Data/Observations/Radiosonde_downloaded_from_NOAA_GUAN/igra-stations.txt'
station_metadata=[]
f = open(station_list_search,'r')
for line in f:
line = line.strip()
line=re.sub(r'([A-Z])\s([A-Z])', r'\1_\2',line)
line=re.sub(r'([A-Z])\s\s([A-Z])', r'\1_\2',line)
station_metadata.append(line.split())
f.close()
first_station_title, first_station_lon, first_station_lat = StationInfoSearch(first_station)
variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5,
'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11, 'theta_e':12, 'theta_e_sat':13}
variable='pressures'
var_index = variable_name_index_match(variable, variable_list)
pressures, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='rel_hum'
var_index = variable_name_index_match(variable, variable_list)
concat_plot_variable, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='windspeeds'
var_index = variable_name_index_match(variable, variable_list)
wind_direction, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='winddirs'
var_index = variable_name_index_match(variable, variable_list)
wind_speed, distances, date_min_max = variable_cat(var_index, station_list_cs)
u_wind,v_wind = UVWinds(wind_direction, wind_speed)
max_contour=100
min_contour=0
tick_interval=10
for i, date_bin in enumerate(date_min_max[:,0]):
try:
cont,cbar = date_bin_plot_wind(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour, v_wind)
cbar.set_label('\%', rotation=90)
print date_bin
plt.title('%s %s Cross-Section of Relative Humidity from Radiosonde Soundings' % (date_bin.strftime("%d %B"), Cross_Section_Title.replace('_',' ') ))
plt.show()
plt.close()
plt.clf()
gc.collect()
except Exception, e:
print e
| false
| true
|
f719fc3ddd0729538402b3d0087f650db4bf9a87
| 3,272
|
py
|
Python
|
extract_features.py
|
bionlplab/heart_failure_mortality
|
f3bbfe65fe6f2c2a076acb38697133b472bf2231
|
[
"BSD-3-Clause"
] | 4
|
2021-06-06T17:50:44.000Z
|
2021-12-27T11:45:34.000Z
|
extract_features.py
|
bionlplab/heart_failure_mortality
|
f3bbfe65fe6f2c2a076acb38697133b472bf2231
|
[
"BSD-3-Clause"
] | 1
|
2021-11-28T00:39:50.000Z
|
2021-12-08T13:58:56.000Z
|
extract_features.py
|
bionlplab/heart_failure_mortality
|
f3bbfe65fe6f2c2a076acb38697133b472bf2231
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import numpy as np
from utils import *
from sklearn.preprocessing import StandardScaler
from collections import defaultdict
import re
def format_labels(file_path, timelines, mapping):
most_recent = mapping.sort_values(["subject_id", "ordering_date"], ascending=False).drop_duplicates("subject_id", keep="first")
label_features = pd.read_csv(file_path)
formatted_features = reformat4pycox(["report_id"], label_features)
#Connect subject to report
data_frames = [timelines, most_recent]
data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames)
#Connect report to labels
data_frames = [data_df, formatted_features]
data_df = reduce(lambda left,right: pd.merge(left,right,on="report_id"), data_frames)
for i in ["ordering_date", "report_id"]:
del data_df[i]
return data_df
def format_hidden_features(file_path, timelines, mapping):
loaded = np.load(file_path)
most_recent = mapping.sort_values(["subject_id", "ordering_date"], ascending=False).drop_duplicates("subject_id", keep="first")
report_ids = list(most_recent['report_id'])
mutable_file = {}
for id in report_ids:
mutable_file[id] = loaded[id].flatten()
loaded = mutable_file
label_features = pd.DataFrame(loaded.values(), index=loaded)
cols = list(label_features.columns)
xcols = ["x" + str(i) for i in cols]
rename_dict = dict(zip(cols,xcols))
rename_dict["index"] = "report_id"
label_features = label_features.reset_index().rename(columns=rename_dict)
#Connect subject to report
data_frames = [timelines, most_recent]
data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames)
#Connect report to labels
data_frames = [data_df, label_features]
data_df = reduce(lambda left,right: pd.merge(left,right,on="report_id"), data_frames)
for i in ["ordering_date", "report_id"]:
del data_df[i]
return data_df
def format_hf_sequence(file_path, timelines, mapping):
loaded = np.load(file_path)
top3_reports = mapping.sort_values(["subject_id", "ordering_date"], ascending=True).groupby("subject_id").tail(3)
#Create a list of report ids
report_dict = top3_reports.groupby("subject_id")["report_id"].apply(list).to_dict()
#Create a dict of report arrays. Format: key: array of report embeddings
embedding_dict = defaultdict(list)
for k,v in report_dict.items():
for vi in v:
embedding_dict[k].append(loaded[vi])
embedding_dict[k] = np.vstack(embedding_dict[k])
#Converting embedding dict into dataframe
label_features = pd.DataFrame(embedding_dict.values(), index=embedding_dict)
label_features[0] = label_features[0].apply(lambda x: add_paddings(x))
list2d = label_features[0]
merged = list(itertools.chain(*list2d))
scaler = StandardScaler()
scaler.fit(merged)
label_features[0] = label_features[0].apply(lambda x: scaler.transform(x))
cols = list(label_features.columns)
xcols = ["x" + str(i) for i in cols]
rename_dict = dict(zip(cols,xcols))
label_features = label_features.rename(columns=rename_dict)
label_features = label_features.reset_index().rename(columns={"index": "subject_id"})
data_frames = [timelines, label_features]
data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames)
return data_df
| 32.078431
| 128
| 0.755501
|
import pandas as pd
import numpy as np
from utils import *
from sklearn.preprocessing import StandardScaler
from collections import defaultdict
import re
def format_labels(file_path, timelines, mapping):
most_recent = mapping.sort_values(["subject_id", "ordering_date"], ascending=False).drop_duplicates("subject_id", keep="first")
label_features = pd.read_csv(file_path)
formatted_features = reformat4pycox(["report_id"], label_features)
data_frames = [timelines, most_recent]
data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames)
data_frames = [data_df, formatted_features]
data_df = reduce(lambda left,right: pd.merge(left,right,on="report_id"), data_frames)
for i in ["ordering_date", "report_id"]:
del data_df[i]
return data_df
def format_hidden_features(file_path, timelines, mapping):
loaded = np.load(file_path)
most_recent = mapping.sort_values(["subject_id", "ordering_date"], ascending=False).drop_duplicates("subject_id", keep="first")
report_ids = list(most_recent['report_id'])
mutable_file = {}
for id in report_ids:
mutable_file[id] = loaded[id].flatten()
loaded = mutable_file
label_features = pd.DataFrame(loaded.values(), index=loaded)
cols = list(label_features.columns)
xcols = ["x" + str(i) for i in cols]
rename_dict = dict(zip(cols,xcols))
rename_dict["index"] = "report_id"
label_features = label_features.reset_index().rename(columns=rename_dict)
data_frames = [timelines, most_recent]
data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames)
data_frames = [data_df, label_features]
data_df = reduce(lambda left,right: pd.merge(left,right,on="report_id"), data_frames)
for i in ["ordering_date", "report_id"]:
del data_df[i]
return data_df
def format_hf_sequence(file_path, timelines, mapping):
loaded = np.load(file_path)
top3_reports = mapping.sort_values(["subject_id", "ordering_date"], ascending=True).groupby("subject_id").tail(3)
report_dict = top3_reports.groupby("subject_id")["report_id"].apply(list).to_dict()
embedding_dict = defaultdict(list)
for k,v in report_dict.items():
for vi in v:
embedding_dict[k].append(loaded[vi])
embedding_dict[k] = np.vstack(embedding_dict[k])
label_features = pd.DataFrame(embedding_dict.values(), index=embedding_dict)
label_features[0] = label_features[0].apply(lambda x: add_paddings(x))
list2d = label_features[0]
merged = list(itertools.chain(*list2d))
scaler = StandardScaler()
scaler.fit(merged)
label_features[0] = label_features[0].apply(lambda x: scaler.transform(x))
cols = list(label_features.columns)
xcols = ["x" + str(i) for i in cols]
rename_dict = dict(zip(cols,xcols))
label_features = label_features.rename(columns=rename_dict)
label_features = label_features.reset_index().rename(columns={"index": "subject_id"})
data_frames = [timelines, label_features]
data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames)
return data_df
| true
| true
|
f719fd14389a9547c6251cef99f54bae3af19a6e
| 221
|
py
|
Python
|
output/models/ms_data/datatypes/facets/non_negative_integer/non_negative_integer_min_exclusive004_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/datatypes/facets/non_negative_integer/non_negative_integer_min_exclusive004_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/datatypes/facets/non_negative_integer/non_negative_integer_min_exclusive004_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.ms_data.datatypes.facets.non_negative_integer.non_negative_integer_min_exclusive004_xsd.non_negative_integer_min_exclusive004 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| 22.1
| 153
| 0.773756
|
from output.models.ms_data.datatypes.facets.non_negative_integer.non_negative_integer_min_exclusive004_xsd.non_negative_integer_min_exclusive004 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| true
| true
|
f719fd37c128d9a9db10d9a47902af2a5eb5d61e
| 3,283
|
py
|
Python
|
lektor/markdown/__init__.py
|
uk0/lektor
|
21bdf99aa1183b4398043f87ba8ed137fad529ce
|
[
"BSD-3-Clause"
] | null | null | null |
lektor/markdown/__init__.py
|
uk0/lektor
|
21bdf99aa1183b4398043f87ba8ed137fad529ce
|
[
"BSD-3-Clause"
] | null | null | null |
lektor/markdown/__init__.py
|
uk0/lektor
|
21bdf99aa1183b4398043f87ba8ed137fad529ce
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from typing import Any
from typing import Dict
from typing import Hashable
from typing import Type
from typing import TYPE_CHECKING
from weakref import ref as weakref
from deprecated import deprecated
from markupsafe import Markup
from lektor.markdown.controller import ControllerCache
from lektor.markdown.controller import FieldOptions
from lektor.markdown.controller import MarkdownController
from lektor.markdown.controller import Meta
from lektor.markdown.controller import RenderResult
from lektor.sourceobj import SourceObject
if sys.version_info >= (3, 8):
from importlib.metadata import version
else:
from importlib_metadata import version
if TYPE_CHECKING: # pragma: no cover
from lektor.environment import Environment
controller_class: Type[MarkdownController]
MISTUNE_VERSION = version("mistune")
if MISTUNE_VERSION.startswith("0."):
from lektor.markdown.mistune0 import MarkdownController0 as controller_class
elif MISTUNE_VERSION.startswith("2."):
from lektor.markdown.mistune2 import MarkdownController2 as controller_class
else: # pragma: no cover
raise ImportError("Unsupported version of mistune")
get_controller = ControllerCache(controller_class)
@deprecated
def make_markdown(env: "Environment") -> Any: # (Environment) -> mistune.Markdown
return get_controller(env).make_parser()
@deprecated
def markdown_to_html(
text: str, record: SourceObject, field_options: FieldOptions
) -> RenderResult:
return get_controller().render(text, record, field_options)
class Markdown:
def __init__(
self, source: str, record: SourceObject, field_options: FieldOptions
) -> None:
self.source = source
self.__record = weakref(record)
self.__field_options = field_options
self.__cache: Dict[Hashable, RenderResult] = {}
def __bool__(self) -> bool:
return bool(self.source)
__nonzero__ = __bool__
@property
def record(self) -> SourceObject:
record = self.__record()
if record is None:
raise RuntimeError("Record has gone away")
return record
def __render(self) -> RenderResult:
# When the markdown instance is attached to a cached object we
# can end up in the situation where, e.g., the base_url has
# changed from the time we were put into the cache to the time
# where we got referenced by something elsewhere. Since this
# affects the processing of relative links, in that case we
# need to re-process our markdown.
controller = get_controller()
key = controller.get_cache_key()
result = self.__cache.get(key) if key is not None else None
if result is None:
result = controller.render(self.source, self.record, self.__field_options)
if key is not None:
self.__cache[key] = result
return result
@property
def meta(self) -> Meta:
return self.__render().meta
@property
def html(self) -> Markup:
return Markup(self.__render().html)
def __getitem__(self, name: str) -> Any:
return self.meta[name]
def __str__(self) -> str:
return self.__render().html
def __html__(self) -> Markup:
return self.html
| 30.682243
| 86
| 0.709108
|
import sys
from typing import Any
from typing import Dict
from typing import Hashable
from typing import Type
from typing import TYPE_CHECKING
from weakref import ref as weakref
from deprecated import deprecated
from markupsafe import Markup
from lektor.markdown.controller import ControllerCache
from lektor.markdown.controller import FieldOptions
from lektor.markdown.controller import MarkdownController
from lektor.markdown.controller import Meta
from lektor.markdown.controller import RenderResult
from lektor.sourceobj import SourceObject
if sys.version_info >= (3, 8):
from importlib.metadata import version
else:
from importlib_metadata import version
if TYPE_CHECKING:
from lektor.environment import Environment
controller_class: Type[MarkdownController]
MISTUNE_VERSION = version("mistune")
if MISTUNE_VERSION.startswith("0."):
from lektor.markdown.mistune0 import MarkdownController0 as controller_class
elif MISTUNE_VERSION.startswith("2."):
from lektor.markdown.mistune2 import MarkdownController2 as controller_class
else:
raise ImportError("Unsupported version of mistune")
get_controller = ControllerCache(controller_class)
@deprecated
def make_markdown(env: "Environment") -> Any:
return get_controller(env).make_parser()
@deprecated
def markdown_to_html(
text: str, record: SourceObject, field_options: FieldOptions
) -> RenderResult:
return get_controller().render(text, record, field_options)
class Markdown:
def __init__(
self, source: str, record: SourceObject, field_options: FieldOptions
) -> None:
self.source = source
self.__record = weakref(record)
self.__field_options = field_options
self.__cache: Dict[Hashable, RenderResult] = {}
def __bool__(self) -> bool:
return bool(self.source)
__nonzero__ = __bool__
@property
def record(self) -> SourceObject:
record = self.__record()
if record is None:
raise RuntimeError("Record has gone away")
return record
def __render(self) -> RenderResult:
controller = get_controller()
key = controller.get_cache_key()
result = self.__cache.get(key) if key is not None else None
if result is None:
result = controller.render(self.source, self.record, self.__field_options)
if key is not None:
self.__cache[key] = result
return result
@property
def meta(self) -> Meta:
return self.__render().meta
@property
def html(self) -> Markup:
return Markup(self.__render().html)
def __getitem__(self, name: str) -> Any:
return self.meta[name]
def __str__(self) -> str:
return self.__render().html
def __html__(self) -> Markup:
return self.html
| true
| true
|
f719fec77a658c0d0bd1fb9dff8594c94cc357ad
| 59,113
|
py
|
Python
|
venv/lib/python3.6/site-packages/bioblend/galaxy/objects/wrappers.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/bioblend/galaxy/objects/wrappers.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/bioblend/galaxy/objects/wrappers.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# pylint: disable=W0622,E1101
"""
A basic object-oriented interface for Galaxy entities.
"""
import abc
import json
from collections.abc import (
Iterable,
Mapping,
Sequence,
)
from typing import Tuple
import bioblend
from bioblend.util import abstractclass
__all__ = (
'Wrapper',
'Step',
'Workflow',
'LibraryContentInfo',
'HistoryContentInfo',
'DatasetContainer',
'History',
'Library',
'Folder',
'Dataset',
'HistoryDatasetAssociation',
'DatasetCollection',
'HistoryDatasetCollectionAssociation',
'LibraryDatasetDatasetAssociation',
'LibraryDataset',
'Tool',
'Job',
'LibraryPreview',
'HistoryPreview',
'WorkflowPreview',
)
@abstractclass
class Wrapper:
"""
Abstract base class for Galaxy entity wrappers.
Wrapper instances wrap deserialized JSON dictionaries such as the
ones obtained by the Galaxy web API, converting key-based access to
attribute-based access (e.g., ``library['name'] -> library.name``).
Dict keys that are converted to attributes are listed in the
``BASE_ATTRS`` class variable: this is the 'stable' interface.
Note that the wrapped dictionary is accessible via the ``wrapped``
attribute.
"""
BASE_ATTRS: Tuple[str, ...] = ('id', )
def __init__(self, wrapped, parent=None, gi=None):
"""
:type wrapped: dict
:param wrapped: JSON-serializable dictionary
:type parent: :class:`Wrapper`
:param parent: the parent of this wrapper
:type gi: :class:`GalaxyInstance`
:param gi: the GalaxyInstance through which we can access this wrapper
"""
if not isinstance(wrapped, Mapping):
raise TypeError('wrapped object must be a mapping type')
# loads(dumps(x)) is a bit faster than deepcopy and allows type checks
try:
dumped = json.dumps(wrapped)
except (TypeError, ValueError):
raise ValueError('wrapped object must be JSON-serializable')
object.__setattr__(self, 'wrapped', json.loads(dumped))
for k in self.BASE_ATTRS:
object.__setattr__(self, k, self.wrapped.get(k))
object.__setattr__(self, '_cached_parent', parent)
object.__setattr__(self, 'is_modified', False)
object.__setattr__(self, 'gi', gi)
@property
def parent(self):
"""
The parent of this wrapper.
"""
return self._cached_parent
@property
def is_mapped(self):
"""
``True`` if this wrapper is mapped to an actual Galaxy entity.
"""
return self.id is not None
def unmap(self):
"""
Disconnect this wrapper from Galaxy.
"""
object.__setattr__(self, 'id', None)
def clone(self):
"""
Return an independent copy of this wrapper.
"""
return self.__class__(self.wrapped)
def touch(self):
"""
Mark this wrapper as having been modified since its creation.
"""
object.__setattr__(self, 'is_modified', True)
if self.parent:
self.parent.touch()
def to_json(self):
"""
Return a JSON dump of this wrapper.
"""
return json.dumps(self.wrapped)
@classmethod
def from_json(cls, jdef):
"""
Build a new wrapper from a JSON dump.
"""
return cls(json.loads(jdef))
# FIXME: things like self.x[0] = 'y' do NOT call self.__setattr__
def __setattr__(self, name, value):
if name not in self.wrapped:
raise AttributeError("can't set attribute")
else:
self.wrapped[name] = value
object.__setattr__(self, name, value)
self.touch()
def __repr__(self):
return f"{self.__class__.__name__}({self.wrapped!r})"
class Step(Wrapper):
"""
Workflow step.
Steps are the main building blocks of a Galaxy workflow. A step can be: an
input (type ``data_collection_input``, ``data_input`` or
``parameter_input``), a computational tool (type ``tool``), a subworkflow
(type ``subworkflow``) or a pause (type ``pause``).
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'input_steps',
'name',
'tool_id',
'tool_inputs',
'tool_version',
'type',
)
def __init__(self, step_dict, parent):
super().__init__(step_dict, parent=parent, gi=parent.gi)
try:
stype = step_dict['type']
except KeyError:
raise ValueError('not a step dict')
if stype not in {'data_collection_input', 'data_input', 'parameter_input', 'pause', 'subworkflow', 'tool'}:
raise ValueError(f"Unknown step type: {stype!r}")
class InvocationStep(Wrapper):
"""
Invocation step.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'action',
'job_id',
'order_index',
'state',
'update_time',
'workflow_step_id',
'workflow_step_label',
'workflow_step_uuid',
)
class Workflow(Wrapper):
"""
Workflows represent ordered sequences of computations on Galaxy.
A workflow defines a sequence of steps that produce one or more
results from an input dataset.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'inputs',
'latest_workflow_uuid',
'name',
'owner',
'published',
'steps',
'tags',
)
POLLING_INTERVAL = 10 # for output state monitoring
def __init__(self, wf_dict, gi=None):
super().__init__(wf_dict, gi=gi)
missing_ids = []
if gi:
tools_list_by_id = [t.id for t in gi.tools.get_previews()]
else:
tools_list_by_id = []
tool_labels_to_ids = {}
for k, v in self.steps.items():
# convert step ids to str for consistency with outer keys
v['id'] = str(v['id'])
for i in v['input_steps'].values():
i['source_step'] = str(i['source_step'])
step = Step(v, self)
self.steps[k] = step
if step.type == 'tool':
if not step.tool_inputs or step.tool_id not in tools_list_by_id:
missing_ids.append(k)
tool_labels_to_ids.setdefault(step.tool_id, set()).add(step.id)
input_labels_to_ids = {}
for id_, d in self.inputs.items():
input_labels_to_ids.setdefault(d['label'], set()).add(id_)
object.__setattr__(self, 'input_labels_to_ids', input_labels_to_ids)
object.__setattr__(self, 'tool_labels_to_ids', tool_labels_to_ids)
dag, inv_dag = self._get_dag()
heads, tails = set(dag), set(inv_dag)
object.__setattr__(self, 'dag', dag)
object.__setattr__(self, 'inv_dag', inv_dag)
object.__setattr__(self, 'source_ids', heads - tails)
assert set(self.inputs) == self.data_collection_input_ids | self.data_input_ids | self.parameter_input_ids, \
f"inputs is {self.inputs!r}, while data_collection_input_ids is {self.data_collection_input_ids!r}, data_input_ids is {self.data_input_ids!r} and parameter_input_ids is {self.parameter_input_ids!r}"
object.__setattr__(self, 'sink_ids', tails - heads)
object.__setattr__(self, 'missing_ids', missing_ids)
def _get_dag(self):
"""
Return the workflow's DAG.
For convenience, this method computes a 'direct' (step =>
successors) and an 'inverse' (step => predecessors)
representation of the same DAG.
For instance, a workflow with a single tool *c*, two inputs
*a, b* and three outputs *d, e, f* is represented by (direct)::
{'a': {'c'}, 'b': {'c'}, 'c': {'d', 'e', 'f'}}
and by (inverse)::
{'c': {'a', 'b'}, 'd': {'c'}, 'e': {'c'}, 'f': {'c'}}
"""
dag, inv_dag = {}, {}
for s in self.steps.values():
for i in s.input_steps.values():
head, tail = i['source_step'], s.id
dag.setdefault(head, set()).add(tail)
inv_dag.setdefault(tail, set()).add(head)
return dag, inv_dag
def sorted_step_ids(self):
"""
Return a topological sort of the workflow's DAG.
"""
ids = []
source_ids = self.source_ids.copy()
inv_dag = {k: v.copy() for k, v in self.inv_dag.items()}
while source_ids:
head = source_ids.pop()
ids.append(head)
for tail in self.dag.get(head, []):
incoming = inv_dag[tail]
incoming.remove(head)
if not incoming:
source_ids.add(tail)
return ids
@property
def data_input_ids(self):
"""
Return the ids of data input steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'data_input'}
@property
def data_collection_input_ids(self):
"""
Return the ids of data collection input steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'data_collection_input'}
@property
def parameter_input_ids(self):
"""
Return the ids of parameter input steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'parameter_input'}
@property
def tool_ids(self):
"""
Return the ids of tool steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'tool'}
@property
def input_labels(self):
"""
Return the labels of this workflow's input steps.
"""
return set(self.input_labels_to_ids)
@property
def is_runnable(self):
"""
Return True if the workflow can be run on Galaxy.
A workflow is considered runnable on a Galaxy instance if all
of the tools it uses are installed in that instance.
"""
return not self.missing_ids
def convert_input_map(self, input_map):
"""
Convert ``input_map`` to the format required by the Galaxy web API.
:type input_map: dict
:param input_map: a mapping from input labels to datasets
:rtype: dict
:return: a mapping from input slot ids to dataset ids in the
format required by the Galaxy web API.
"""
m = {}
for label, slot_ids in self.input_labels_to_ids.items():
datasets = input_map.get(label, [])
if not isinstance(datasets, Iterable):
datasets = [datasets]
if len(datasets) < len(slot_ids):
raise RuntimeError(f'not enough datasets for "{label}"')
for id_, ds in zip(slot_ids, datasets):
m[id_] = {'id': ds.id, 'src': ds.SRC}
return m
def preview(self):
getf = self.gi.workflows.get_previews
try:
p = [_ for _ in getf(published=True) if _.id == self.id][0]
except IndexError:
raise ValueError(f"no object for id {self.id}")
return p
def run(self, input_map=None, history='', params=None, import_inputs=False,
replacement_params=None, wait=False,
polling_interval=POLLING_INTERVAL, break_on_error=True):
"""
Run the workflow in the current Galaxy instance.
.. deprecated:: 0.16.0
Use :meth:`invoke` instead.
:type input_map: dict
:param input_map: a mapping from workflow input labels to
datasets, e.g.: ``dict(zip(workflow.input_labels,
library.get_datasets()))``
:type history: :class:`History` or str
:param history: either a valid history object (results will be
stored there) or a string (a new history will be created with
the given name).
:type params: dict
:param params: a mapping of non-datasets tool parameters (see below)
:type import_inputs: bool
:param import_inputs: If ``True``, workflow inputs will be imported into
the history; if ``False``, only workflow outputs will be visible in
the history.
:type replacement_params: dict
:param replacement_params: pattern-based replacements for
post-job actions (see the docs for
:meth:`~bioblend.galaxy.workflows.WorkflowClient.invoke_workflow`)
:type wait: bool
:param wait: whether to wait while the returned datasets are
in a pending state
:type polling_interval: float
:param polling_interval: polling interval in seconds
:type break_on_error: bool
:param break_on_error: whether to break as soon as at least one
of the returned datasets is in the 'error' state
:rtype: tuple
:return: list of output datasets, output history
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
Example: set 'a' to 1 for the third workflow step::
params = {workflow.steps[2].id: {'a': 1}}
.. warning::
This is a blocking operation that can take a very long time. If
``wait`` is set to ``False``, the method will return as soon as the
workflow has been *scheduled*, otherwise it will wait until the
workflow has been *run*. With a large number of steps, however, the
delay may not be negligible even in the former case (e.g. minutes for
100 steps).
"""
if not self.is_mapped:
raise RuntimeError('workflow is not mapped to a Galaxy object')
if not self.is_runnable:
missing_tools_str = ', '.join(f"{self.steps[step_id].tool_id}[{step_id}]" for step_id in self.missing_ids)
raise RuntimeError(f"workflow has missing tools: {missing_tools_str}")
kwargs = {
'dataset_map': self.convert_input_map(input_map or {}),
'params': params,
'import_inputs_to_history': import_inputs,
'replacement_params': replacement_params,
}
if isinstance(history, History):
try:
kwargs['history_id'] = history.id
except AttributeError:
raise RuntimeError('history does not have an id')
elif isinstance(history, str):
kwargs['history_name'] = history
else:
raise TypeError(
'history must be either a history wrapper or a string')
res = self.gi.gi.workflows.run_workflow(self.id, **kwargs)
# res structure: {'history': HIST_ID, 'outputs': [CI_ID, CI_ID, ...]}
out_hist = self.gi.histories.get(res['history'])
content_infos_dict = {ci.id: ci for ci in out_hist.content_infos}
outputs = []
for output_id in res['outputs']:
if content_infos_dict[output_id].type == 'file':
outputs.append(out_hist.get_dataset(output_id))
elif content_infos_dict[output_id].type == 'collection':
outputs.append(out_hist.get_dataset_collection(output_id))
if wait:
self.gi._wait_datasets(outputs, polling_interval=polling_interval,
break_on_error=break_on_error)
return outputs, out_hist
def export(self):
"""
Export a re-importable representation of the workflow.
:rtype: dict
:return: a JSON-serializable dump of the workflow
"""
return self.gi.gi.workflows.export_workflow_dict(self.id)
def delete(self):
"""
Delete this workflow.
.. warning::
Deleting a workflow is irreversible - all of the data from
the workflow will be permanently deleted.
"""
self.gi.workflows.delete(id_=self.id)
self.unmap()
def invoke(self, inputs=None, params=None, history=None,
import_inputs_to_history=None, replacement_params=None,
allow_tool_state_corrections=True, inputs_by=None,
parameters_normalized=False):
"""
Invoke the workflow. This will cause a workflow to be scheduled
and return an object describing the workflow invocation.
:type inputs: dict
:param inputs: A mapping of workflow inputs to datasets and dataset collections.
The datasets source can be a LibraryDatasetDatasetAssociation (``ldda``),
LibraryDataset (``ld``), HistoryDatasetAssociation (``hda``), or
HistoryDatasetCollectionAssociation (``hdca``).
The map must be in the following format:
``{'<input_index>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda, hdca]'}}``
(e.g. ``{'2': {'id': '29beef4fadeed09f', 'src': 'hda'}}``)
This map may also be indexed by the UUIDs of the workflow steps,
as indicated by the ``uuid`` property of steps returned from the
Galaxy API. Alternatively workflow steps may be addressed by
the label that can be set in the workflow editor. If using
uuid or label you need to also set the ``inputs_by`` parameter
to ``step_uuid`` or ``name``.
:type params: dict
:param params: A mapping of non-datasets tool parameters (see below)
:type history: str
:param history: The history in which to store the workflow
output.
:type import_inputs_to_history: bool
:param import_inputs_to_history: If ``True``, used workflow inputs will
be imported into the history. If ``False``, only workflow outputs will
be visible in the given history.
:type allow_tool_state_corrections: bool
:param allow_tool_state_corrections: If True, allow Galaxy to fill in
missing tool state when running workflows. This may be useful for
workflows using tools that have changed over time or for workflows
built outside of Galaxy with only a subset of inputs defined.
:type replacement_params: dict
:param replacement_params: pattern-based replacements for post-job
actions (see below)
:type inputs_by: str
:param inputs_by: Determines how inputs are referenced. Can be
"step_index|step_uuid" (default), "step_index", "step_id", "step_uuid", or "name".
:type parameters_normalized: bool
:param parameters_normalized: Whether Galaxy should normalize ``params``
to ensure everything is referenced by a numeric step ID. Default is
``False``, but when setting ``params`` for a subworkflow, ``True`` is
required.
:rtype: Invocation
:return: the workflow invocation
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
For a ``repeat`` parameter, the names of the contained parameters needs
to be specified as ``<repeat name>_<repeat index>|<param name>``, with
the repeat index starting at 0. For example, if the tool XML contains::
<repeat name="cutoff" title="Parameters used to filter cells" min="1">
<param name="name" type="text" value="n_genes" label="Name of param...">
<option value="n_genes">n_genes</option>
<option value="n_counts">n_counts</option>
</param>
<param name="min" type="float" min="0" value="0" label="Min value"/>
</repeat>
then the PARAM_DICT should be something like::
{...
"cutoff_0|name": "n_genes",
"cutoff_0|min": "2",
"cutoff_1|name": "n_counts",
"cutoff_1|min": "4",
...}
At the time of this writing, it is not possible to change the number of
times the contained parameters are repeated. Therefore, the parameter
indexes can go from 0 to n-1, where n is the number of times the
repeated element was added when the workflow was saved in the Galaxy UI.
The ``replacement_params`` dict should map parameter names in
post-job actions (PJAs) to their runtime values. For
instance, if the final step has a PJA like the following::
{'RenameDatasetActionout_file1': {'action_arguments': {'newname': '${output}'},
'action_type': 'RenameDatasetAction',
'output_name': 'out_file1'}}
then the following renames the output dataset to 'foo'::
replacement_params = {'output': 'foo'}
see also `this email thread
<http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_.
.. warning::
Historically, the ``run_workflow`` method consumed a ``dataset_map``
data structure that was indexed by unencoded workflow step IDs. These
IDs would not be stable across Galaxy instances. The new ``inputs``
property is instead indexed by either the ``order_index`` property
(which is stable across workflow imports) or the step UUID which is
also stable.
"""
inv_dict = self.gi.gi.workflows.invoke_workflow(
workflow_id=self.id,
inputs=inputs,
params=params,
history_id=history.id,
import_inputs_to_history=import_inputs_to_history,
replacement_params=replacement_params,
allow_tool_state_corrections=allow_tool_state_corrections,
inputs_by=inputs_by,
parameters_normalized=parameters_normalized
)
return self.gi.invocations.get(inv_dict['id'])
class Invocation(Wrapper):
"""
Invocation of a workflow.
This causes the steps of a workflow to be executed in sequential order.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'history_id',
'inputs',
'state',
'steps',
'update_time',
'uuid',
'workflow_id',
)
def __init__(self, inv_dict, gi=None):
super().__init__(inv_dict, gi=gi)
self.steps = [InvocationStep(step, self) for step in self.steps]
self.inputs = [{**v, 'label': k} for k, v in self.inputs.items()]
def sorted_step_ids(self):
"""
Get the step IDs sorted based on this order index.
:rtype: list of str
:param: sorted step IDs
"""
return [step.id for step in sorted(self.steps, key=lambda step: step.order_index)]
def step_states(self):
"""
Get the set of step states for this invocation.
:rtype: set
:param: step states
"""
return {step.state for step in self.steps}
def number_of_steps(self):
"""
Get the number of steps for this invocation.
:rtype: int
:param: number of steps
"""
return len(self.steps)
def sorted_steps_by(self, indices=None, states=None, step_ids=None):
"""
Get steps for this invocation, or get a subset by specifying
optional parameters for filtering.
:type indices: list of int
:param indices: return steps that have matching order_index
:type states: list of str
:param states: return steps that have matching states
:type step_ids: list of str
:param step_ids: return steps that have matching step_ids
:rtype: list of InvocationStep
:param: invocation steps
"""
steps = self.steps
if indices is not None:
steps = filter(lambda step: step.order_index in indices, steps)
if states is not None:
steps = filter(lambda step: step.state in states, steps)
if step_ids is not None:
steps = filter(lambda step: step.id in step_ids, steps)
return sorted(steps, key=lambda step: step.order_index)
def cancel(self):
"""
Cancel this invocation.
.. note::
On success, this method updates the Invocation object's internal variables.
"""
inv_dict = self.gi.gi.invocations.cancel_invocation(self.id)
self.__init__(inv_dict, gi=self.gi)
def refresh(self):
"""
Update this invocation with the latest information from the server.
.. note::
On success, this method updates the Invocation object's internal variables.
"""
inv_dict = self.gi.gi.invocations.show_invocation(self.id)
self.__init__(inv_dict, gi=self.gi)
def run_step_actions(self, steps, actions):
"""
Run actions for active steps of this invocation.
:type steps: list of InvocationStep
:param steps: list of steps to run actions on
:type actions: list of str
:param actions: list of actions to run
.. note::
On success, this method updates the Invocation object's internal step variables.
"""
if not len(steps) == len(actions):
raise RuntimeError(f'Different number of ``steps`` ({len(steps)}) and ``actions`` ({len(actions)}) in ``{self}.run_step_actions()``')
step_dict_list = [self.gi.gi.invocations.run_invocation_step_action(self.id, step.id, action) for step, action in zip(steps, actions)]
for step, step_dict in zip(steps, step_dict_list):
step.__init__(step_dict, parent=self)
def summary(self):
"""
Get a summary for this invocation.
:rtype: dict
:param: invocation summary
"""
return self.gi.gi.invocations.get_invocation_summary(self.id)
def step_jobs_summary(self):
"""
Get a summary for this invocation's step jobs.
:rtype: list of dicts
:param: step job summaries
"""
return self.gi.gi.invocations.get_invocation_step_jobs_summary(self.id)
def report(self):
"""
Get a dictionary containing a Markdown report for this invocation.
:rtype: dict
:param: invocation report
"""
return self.gi.gi.invocations.get_invocation_report(self.id)
def save_report_pdf(self, file_path, chunk_size=bioblend.CHUNK_SIZE):
"""
Download a PDF report for this invocation.
:type file_path: str
:param file_path: path to save the report
:type chunk_size: int
:param chunk_size: chunk size in bytes for reading remote data
"""
self.gi.gi.invocations.get_invocation_report_pdf(self.id, file_path, chunk_size)
def biocompute_object(self):
"""
Get a BioCompute object for this invocation.
:rtype: dict
:param: BioCompute object
"""
return self.gi.gi.invocations.get_invocation_biocompute_object(self.id)
def wait(self, maxwait=12000, interval=3, check=True):
"""
Wait for this invocation to reach a terminal state.
:type maxwait: float
:param maxwait: upper limit on waiting time
:type interval: float
:param interval: polling interval in secconds
:type check: bool
:param check: if ``true``, raise an error if the terminal state is not 'scheduled'
.. note::
On success, this method updates the Invocation object's internal variables.
"""
inv_dict = self.gi.gi.invocations.wait_for_invocation(self.id, maxwait=maxwait, interval=interval, check=check)
self.__init__(inv_dict, gi=self.gi)
class Dataset(Wrapper, metaclass=abc.ABCMeta):
"""
Abstract base class for Galaxy datasets.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'data_type',
'file_ext',
'file_name',
'file_size',
'genome_build',
'misc_info',
'name',
'state',
)
POLLING_INTERVAL = 1 # for state monitoring
def __init__(self, ds_dict, container, gi=None):
super().__init__(ds_dict, gi=gi)
object.__setattr__(self, 'container', container)
@property
@abc.abstractmethod
def _stream_url(self):
"""
Return the URL to stream this dataset.
"""
pass
def get_stream(self, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and return an iterator over its contents.
:type chunk_size: int
:param chunk_size: read this amount of bytes at a time
"""
kwargs = {'stream': True}
if isinstance(self, LibraryDataset):
kwargs['params'] = {'ld_ids%5B%5D': self.id}
r = self.gi.gi.make_get_request(self._stream_url, **kwargs)
if isinstance(self, LibraryDataset) and r.status_code == 500:
# compatibility with older Galaxy releases
kwargs['params'] = {'ldda_ids%5B%5D': self.id}
r = self.gi.gi.make_get_request(self._stream_url, **kwargs)
r.raise_for_status()
return r.iter_content(chunk_size) # FIXME: client can't close r
def peek(self, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and return the first chunk.
See :meth:`.get_stream` for param info.
"""
try:
return next(self.get_stream(chunk_size=chunk_size))
except StopIteration:
return b''
def download(self, file_object, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and save its contents to ``file_object``.
:type file_object: file
:param file_object: output file object
See :meth:`.get_stream` for info on other params.
"""
for chunk in self.get_stream(chunk_size=chunk_size):
file_object.write(chunk)
def get_contents(self, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and return its **full** contents.
See :meth:`.get_stream` for param info.
"""
return b''.join(self.get_stream(chunk_size=chunk_size))
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
gi_client = getattr(self.gi.gi, self.container.API_MODULE)
ds_dict = gi_client.show_dataset(self.container.id, self.id)
self.__init__(ds_dict, self.container, self.gi)
return self
def wait(self, polling_interval=POLLING_INTERVAL, break_on_error=True):
"""
Wait for this dataset to come out of the pending states.
:type polling_interval: float
:param polling_interval: polling interval in seconds
:type break_on_error: bool
:param break_on_error: if ``True``, raise a RuntimeError exception if
the dataset ends in the 'error' state.
.. warning::
This is a blocking operation that can take a very long time. Also,
note that this method does not return anything; however, this dataset
is refreshed (possibly multiple times) during the execution.
"""
self.gi._wait_datasets([self], polling_interval=polling_interval,
break_on_error=break_on_error)
class HistoryDatasetAssociation(Dataset):
"""
Maps to a Galaxy ``HistoryDatasetAssociation``.
"""
BASE_ATTRS = Dataset.BASE_ATTRS + ('annotation', 'deleted', 'purged', 'tags', 'visible')
SRC = 'hda'
@property
def _stream_url(self):
base_url = self.gi.gi.histories._make_url(module_id=self.container.id, contents=True)
return f"{base_url}/{self.id}/display"
def update(self, **kwds):
"""
Update this history dataset metadata. Some of the attributes that can be
modified are documented below.
:type name: str
:param name: Replace history dataset name with the given string
:type genome_build: str
:param genome_build: Replace history dataset genome build (dbkey)
:type annotation: str
:param annotation: Replace history dataset annotation with given string
:type deleted: bool
:param deleted: Mark or unmark history dataset as deleted
:type visible: bool
:param visible: Mark or unmark history dataset as visible
"""
res = self.gi.gi.histories.update_dataset(self.container.id, self.id, **kwds)
# Refresh also the history because the dataset may have been (un)deleted
self.container.refresh()
self.__init__(res, self.container, gi=self.gi)
return self
def delete(self, purge=False):
"""
Delete this history dataset.
:type purge: bool
:param purge: if ``True``, also purge (permanently delete) the dataset
.. note::
For the purge option to work, the Galaxy instance must have the
``allow_user_dataset_purge`` option set to ``true`` in the
``config/galaxy.yml`` configuration file.
"""
self.gi.gi.histories.delete_dataset(self.container.id, self.id, purge=purge)
self.container.refresh()
self.refresh()
class DatasetCollection(Wrapper, metaclass=abc.ABCMeta):
"""
Abstract base class for Galaxy dataset collections.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'collection_type',
'deleted',
'name',
'state',
)
def __init__(self, dsc_dict, container, gi=None):
super().__init__(dsc_dict, gi=gi)
object.__setattr__(self, 'container', container)
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
gi_client = getattr(self.gi.gi, self.container.API_MODULE)
dsc_dict = gi_client.show_dataset_collection(self.container.id, self.id)
self.__init__(dsc_dict, self.container, self.gi)
return self
@abc.abstractmethod
def delete(self):
pass
class HistoryDatasetCollectionAssociation(DatasetCollection):
"""
Maps to a Galaxy ``HistoryDatasetCollectionAssociation``.
"""
BASE_ATTRS = DatasetCollection.BASE_ATTRS + ('tags', 'visible', 'elements')
SRC = 'hdca'
def delete(self):
"""
Delete this dataset collection.
"""
self.gi.gi.histories.delete_dataset_collection(self.container.id, self.id)
self.container.refresh()
self.refresh()
@abstractclass
class LibRelatedDataset(Dataset):
"""
Base class for LibraryDatasetDatasetAssociation and LibraryDataset classes.
"""
@property
def _stream_url(self):
base_url = self.gi.gi.libraries._make_url()
return f"{base_url}/datasets/download/uncompressed"
class LibraryDatasetDatasetAssociation(LibRelatedDataset):
"""
Maps to a Galaxy ``LibraryDatasetDatasetAssociation``.
"""
BASE_ATTRS = LibRelatedDataset.BASE_ATTRS + ('deleted',)
SRC = 'ldda'
class LibraryDataset(LibRelatedDataset):
"""
Maps to a Galaxy ``LibraryDataset``.
"""
SRC = 'ld'
def delete(self, purged=False):
"""
Delete this library dataset.
:type purged: bool
:param purged: if ``True``, also purge (permanently delete) the dataset
"""
self.gi.gi.libraries.delete_library_dataset(
self.container.id, self.id, purged=purged)
self.container.refresh()
self.refresh()
def update(self, **kwds):
"""
Update this library dataset metadata. Some of the attributes that can be
modified are documented below.
:type name: str
:param name: Replace history dataset name with the given string
:type genome_build: str
:param genome_build: Replace history dataset genome build (dbkey)
"""
res = self.gi.gi.libraries.update_library_dataset(self.id, **kwds)
self.container.refresh()
self.__init__(res, self.container, gi=self.gi)
return self
@abstractclass
class ContentInfo(Wrapper):
"""
Instances of this class wrap dictionaries obtained by getting
``/api/{histories,libraries}/<ID>/contents`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'name',
'type',
)
class LibraryContentInfo(ContentInfo):
"""
Instances of this class wrap dictionaries obtained by getting
``/api/libraries/<ID>/contents`` from Galaxy.
"""
class HistoryContentInfo(ContentInfo):
"""
Instances of this class wrap dictionaries obtained by getting
``/api/histories/<ID>/contents`` from Galaxy.
"""
BASE_ATTRS = ContentInfo.BASE_ATTRS + ('deleted', 'state', 'visible')
class DatasetContainer(Wrapper, metaclass=abc.ABCMeta):
"""
Abstract base class for dataset containers (histories and libraries).
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'name',
)
def __init__(self, c_dict, content_infos=None, gi=None):
"""
:type content_infos: list of :class:`ContentInfo`
:param content_infos: info objects for the container's contents
"""
super().__init__(c_dict, gi=gi)
if content_infos is None:
content_infos = []
object.__setattr__(self, 'content_infos', content_infos)
object.__setattr__(self, 'obj_gi_client', getattr(self.gi, self.API_MODULE))
@property
@abc.abstractmethod
def API_MODULE(self):
pass
@property
def dataset_ids(self):
"""
Return the ids of the contained datasets.
"""
return [_.id for _ in self.content_infos if _.type == 'file']
def preview(self):
getf = self.obj_gi_client.get_previews
# self.state could be stale: check both regular and deleted containers
try:
p = [_ for _ in getf() if _.id == self.id][0]
except IndexError:
try:
p = [_ for _ in getf(deleted=True) if _.id == self.id][0]
except IndexError:
raise ValueError(f"no object for id {self.id}")
return p
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
fresh = self.obj_gi_client.get(self.id)
self.__init__(
fresh.wrapped, content_infos=fresh.content_infos, gi=self.gi)
return self
def get_dataset(self, ds_id):
"""
Retrieve the dataset corresponding to the given id.
:type ds_id: str
:param ds_id: dataset id
:rtype: :class:`~.HistoryDatasetAssociation` or
:class:`~.LibraryDataset`
:return: the dataset corresponding to ``ds_id``
"""
gi_client = getattr(self.gi.gi, self.API_MODULE)
ds_dict = gi_client.show_dataset(self.id, ds_id)
return self.DS_TYPE(ds_dict, self, gi=self.gi)
def get_datasets(self, name=None):
"""
Get all datasets contained inside this dataset container.
:type name: str
:param name: return only datasets with this name
:rtype: list of :class:`~.HistoryDatasetAssociation` or list of
:class:`~.LibraryDataset`
:return: datasets with the given name contained inside this
container
.. note::
when filtering library datasets by name, specify their full
paths starting from the library's root folder, e.g.,
``/seqdata/reads.fastq``. Full paths are available through
the ``content_infos`` attribute of
:class:`~.Library` objects.
"""
if name is None:
ds_ids = self.dataset_ids
else:
ds_ids = [_.id for _ in self.content_infos if _.name == name]
return [self.get_dataset(_) for _ in ds_ids]
class History(DatasetContainer):
"""
Maps to a Galaxy history.
"""
BASE_ATTRS = DatasetContainer.BASE_ATTRS + ('annotation', 'published', 'state', 'state_ids', 'state_details', 'tags')
DS_TYPE = HistoryDatasetAssociation
DSC_TYPE = HistoryDatasetCollectionAssociation
CONTENT_INFO_TYPE = HistoryContentInfo
API_MODULE = 'histories'
def update(self, **kwds):
"""
Update history metadata information. Some of the attributes that can be
modified are documented below.
:type name: str
:param name: Replace history name with the given string
:type annotation: str
:param annotation: Replace history annotation with the given string
:type deleted: bool
:param deleted: Mark or unmark history as deleted
:type purged: bool
:param purged: If True, mark history as purged (permanently deleted).
:type published: bool
:param published: Mark or unmark history as published
:type importable: bool
:param importable: Mark or unmark history as importable
:type tags: list
:param tags: Replace history tags with the given list
"""
# TODO: wouldn't it be better if name and annotation were attributes?
self.gi.gi.histories.update_history(self.id, **kwds)
self.refresh()
return self
def delete(self, purge=False):
"""
Delete this history.
:type purge: bool
:param purge: if ``True``, also purge (permanently delete) the history
.. note::
For the purge option to work, the Galaxy instance must have the
``allow_user_dataset_purge`` option set to ``true`` in the
``config/galaxy.yml`` configuration file.
"""
self.gi.histories.delete(id_=self.id, purge=purge)
self.refresh()
self.unmap()
def import_dataset(self, lds):
"""
Import a dataset into the history from a library.
:type lds: :class:`~.LibraryDataset`
:param lds: the library dataset to import
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the imported history dataset
"""
if not self.is_mapped:
raise RuntimeError('history is not mapped to a Galaxy object')
if not isinstance(lds, LibraryDataset):
raise TypeError('lds is not a LibraryDataset')
res = self.gi.gi.histories.upload_dataset_from_library(self.id, lds.id)
if not isinstance(res, Mapping):
raise RuntimeError(
f"upload_dataset_from_library: unexpected reply: {res!r}"
)
self.refresh()
return self.get_dataset(res['id'])
def upload_file(self, path, **kwargs):
"""
Upload the file specified by ``path`` to this history.
:type path: str
:param path: path of the file to upload
See :meth:`~bioblend.galaxy.tools.ToolClient.upload_file` for
the optional parameters.
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the uploaded dataset
"""
out_dict = self.gi.gi.tools.upload_file(path, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
upload_dataset = upload_file
def upload_from_ftp(self, path, **kwargs):
"""
Upload the file specified by ``path`` from the user's FTP directory to
this history.
:type path: str
:param path: path of the file in the user's FTP directory
See :meth:`~bioblend.galaxy.tools.ToolClient.upload_file` for
the optional parameters.
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the uploaded dataset
"""
out_dict = self.gi.gi.tools.upload_from_ftp(path, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
def paste_content(self, content, **kwargs):
"""
Upload a string to a new dataset in this history.
:type content: str
:param content: content of the new dataset to upload
See :meth:`~bioblend.galaxy.tools.ToolClient.upload_file` for
the optional parameters (except file_name).
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the uploaded dataset
"""
out_dict = self.gi.gi.tools.paste_content(content, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
def export(self, gzip=True, include_hidden=False, include_deleted=False,
wait=False, maxwait=None):
"""
Start a job to create an export archive for this history. See
:meth:`~bioblend.galaxy.histories.HistoryClient.export_history`
for parameter and return value info.
"""
return self.gi.gi.histories.export_history(
self.id, gzip=gzip, include_hidden=include_hidden,
include_deleted=include_deleted, wait=wait, maxwait=maxwait)
def download(self, jeha_id, outf, chunk_size=bioblend.CHUNK_SIZE):
"""
Download an export archive for this history. Use :meth:`export`
to create an export and get the required ``jeha_id``. See
:meth:`~bioblend.galaxy.histories.HistoryClient.download_history`
for parameter and return value info.
"""
return self.gi.gi.histories.download_history(
self.id, jeha_id, outf, chunk_size=chunk_size)
def create_dataset_collection(self, collection_description):
"""
Create a new dataset collection in the history by providing a collection description.
:type collection_description: bioblend.galaxy.dataset_collections.CollectionDescription
:param collection_description: a description of the dataset collection
:rtype: :class:`~.HistoryDatasetCollectionAssociation`
:return: the new dataset collection
"""
dataset_collection = self.gi.gi.histories.create_dataset_collection(self.id, collection_description)
self.refresh()
return self.get_dataset_collection(dataset_collection['id'])
def get_dataset_collection(self, dsc_id):
"""
Retrieve the dataset collection corresponding to the given id.
:type dsc_id: str
:param dsc_id: dataset collection id
:rtype: :class:`~.HistoryDatasetCollectionAssociation`
:return: the dataset collection corresponding to ``dsc_id``
"""
dsc_dict = self.gi.gi.histories.show_dataset_collection(self.id, dsc_id)
return self.DSC_TYPE(dsc_dict, self, gi=self.gi)
class Library(DatasetContainer):
"""
Maps to a Galaxy library.
"""
BASE_ATTRS = DatasetContainer.BASE_ATTRS + ('description', 'synopsis')
DS_TYPE = LibraryDataset
CONTENT_INFO_TYPE = LibraryContentInfo
API_MODULE = 'libraries'
@property
def folder_ids(self):
"""
Return the ids of the contained folders.
"""
return [_.id for _ in self.content_infos if _.type == 'folder']
def delete(self):
"""
Delete this library.
"""
self.gi.libraries.delete(id_=self.id)
self.refresh()
self.unmap()
def _pre_upload(self, folder):
"""
Return the id of the given folder, after sanity checking.
"""
if not self.is_mapped:
raise RuntimeError('library is not mapped to a Galaxy object')
return None if folder is None else folder.id
def upload_data(self, data, folder=None, **kwargs):
"""
Upload data to this library.
:type data: str
:param data: dataset contents
:type folder: :class:`~.Folder`
:param folder: a folder object, or ``None`` to upload to the root folder
:rtype: :class:`~.LibraryDataset`
:return: the dataset object that represents the uploaded content
Optional keyword arguments: ``file_type``, ``dbkey``.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_contents(
self.id, data, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_url(self, url, folder=None, **kwargs):
"""
Upload data to this library from the given URL.
:type url: str
:param url: URL from which data should be read
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_from_url(
self.id, url, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_local(self, path, folder=None, **kwargs):
"""
Upload data to this library from a local file.
:type path: str
:param path: local file path from which data should be read
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_from_local_path(
self.id, path, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_galaxy_fs(self, paths, folder=None, link_data_only=None, **kwargs):
"""
Upload data to this library from filesystem paths on the server.
.. note::
For this method to work, the Galaxy instance must have the
``allow_path_paste`` option set to ``true`` in the
``config/galaxy.yml`` configuration file.
:type paths: str or :class:`~collections.abc.Iterable` of str
:param paths: server-side file paths from which data should be read
:type link_data_only: str
:param link_data_only: either 'copy_files' (default) or
'link_to_files'. Setting to 'link_to_files' symlinks instead of
copying the files
:rtype: list of :class:`~.LibraryDataset`
:return: the dataset objects that represent the uploaded content
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
if isinstance(paths, str):
paths = (paths,)
paths = '\n'.join(paths)
res = self.gi.gi.libraries.upload_from_galaxy_filesystem(
self.id, paths, folder_id=fid, link_data_only=link_data_only,
**kwargs)
if res is None:
raise RuntimeError('upload_from_galaxy_filesystem: no reply')
if not isinstance(res, Sequence):
raise RuntimeError(
f"upload_from_galaxy_filesystem: unexpected reply: {res!r}"
)
new_datasets = [
self.get_dataset(ds_info['id']) for ds_info in res
]
self.refresh()
return new_datasets
def copy_from_dataset(self, hda, folder=None, message=''):
"""
Copy a history dataset into this library.
:type hda: :class:`~.HistoryDatasetAssociation`
:param hda: history dataset to copy into the library
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.copy_from_dataset(
self.id, hda.id, folder_id=fid, message=message)
self.refresh()
return self.get_dataset(res['library_dataset_id'])
def create_folder(self, name, description=None, base_folder=None):
"""
Create a folder in this library.
:type name: str
:param name: folder name
:type description: str
:param description: optional folder description
:type base_folder: :class:`~.Folder`
:param base_folder: parent folder, or ``None`` to create in the root
folder
:rtype: :class:`~.Folder`
:return: the folder just created
"""
bfid = None if base_folder is None else base_folder.id
res = self.gi.gi.libraries.create_folder(
self.id, name, description=description, base_folder_id=bfid)
self.refresh()
return self.get_folder(res[0]['id'])
def get_folder(self, f_id):
"""
Retrieve the folder corresponding to the given id.
:rtype: :class:`~.Folder`
:return: the folder corresponding to ``f_id``
"""
f_dict = self.gi.gi.libraries.show_folder(self.id, f_id)
return Folder(f_dict, self, gi=self.gi)
@property
def root_folder(self):
"""
The root folder of this library.
:rtype: :class:`~.Folder`
:return: the root folder of this library
"""
return self.get_folder(self.gi.gi.libraries._get_root_folder_id(self.id))
class Folder(Wrapper):
"""
Maps to a folder in a Galaxy library.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'description',
'item_count',
'name',
)
def __init__(self, f_dict, container, gi=None):
super().__init__(f_dict, gi=gi)
object.__setattr__(self, 'container', container)
@property
def parent(self):
"""
The parent folder of this folder. The parent of the root folder is
``None``.
:rtype: :class:`~.Folder`
:return: the parent of this folder
"""
if self._cached_parent is None:
object.__setattr__(self,
'_cached_parent',
self._get_parent())
return self._cached_parent
def _get_parent(self):
"""
Return the parent folder of this folder.
"""
parent_id = self.wrapped['parent_id']
if parent_id is None:
return None
return self.container.get_folder(parent_id)
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
f_dict = self.gi.gi.libraries.show_folder(self.container.id, self.id)
self.__init__(f_dict, self.container, gi=self.gi)
return self
class Tool(Wrapper):
"""
Maps to a Galaxy tool.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'name',
'version',
)
POLLING_INTERVAL = 10 # for output state monitoring
def run(self, inputs, history, wait=False,
polling_interval=POLLING_INTERVAL):
"""
Execute this tool in the given history with inputs from dict
``inputs``.
:type inputs: dict
:param inputs: dictionary of input datasets and parameters for
the tool (see below)
:type history: :class:`History`
:param history: the history where to execute the tool
:type wait: bool
:param wait: whether to wait while the returned datasets are
in a pending state
:type polling_interval: float
:param polling_interval: polling interval in seconds
:rtype: list of :class:`HistoryDatasetAssociation`
:return: list of output datasets
The ``inputs`` dict should contain input datasets and parameters
in the (largely undocumented) format used by the Galaxy API.
Some examples can be found in `Galaxy's API test suite
<https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy_test/api/test_tools.py>`_.
The value of an input dataset can also be a :class:`Dataset`
object, which will be automatically converted to the needed
format.
"""
for k, v in inputs.items():
if isinstance(v, Dataset):
inputs[k] = {'src': v.SRC, 'id': v.id}
out_dict = self.gi.gi.tools.run_tool(history.id, self.id, inputs)
outputs = [history.get_dataset(_['id']) for _ in out_dict['outputs']]
if wait:
self.gi._wait_datasets(outputs, polling_interval=polling_interval)
return outputs
class Job(Wrapper):
"""
Maps to a Galaxy job.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + ('state',)
@abstractclass
class DatasetContainerPreview(Wrapper):
"""
Abstract base class for dataset container (history and library) 'previews'.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'name',
)
class LibraryPreview(DatasetContainerPreview):
"""
Models Galaxy library 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/libraries`` from Galaxy.
"""
class HistoryPreview(DatasetContainerPreview):
"""
Models Galaxy history 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/histories`` from Galaxy.
"""
BASE_ATTRS = DatasetContainerPreview.BASE_ATTRS + (
'annotation',
'published',
'purged',
'tags',
)
class WorkflowPreview(Wrapper):
"""
Models Galaxy workflow 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/workflows`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'latest_workflow_uuid',
'name',
'number_of_steps',
'owner',
'published',
'show_in_tool_panel',
'tags',
)
class InvocationPreview(Wrapper):
"""
Models Galaxy invocation 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/invocations`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'history_id',
'id',
'state',
'update_time',
'uuid',
'workflow_id',
)
class JobPreview(Wrapper):
"""
Models Galaxy job 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/jobs`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + ('state',)
| 33.434955
| 210
| 0.612319
|
import abc
import json
from collections.abc import (
Iterable,
Mapping,
Sequence,
)
from typing import Tuple
import bioblend
from bioblend.util import abstractclass
__all__ = (
'Wrapper',
'Step',
'Workflow',
'LibraryContentInfo',
'HistoryContentInfo',
'DatasetContainer',
'History',
'Library',
'Folder',
'Dataset',
'HistoryDatasetAssociation',
'DatasetCollection',
'HistoryDatasetCollectionAssociation',
'LibraryDatasetDatasetAssociation',
'LibraryDataset',
'Tool',
'Job',
'LibraryPreview',
'HistoryPreview',
'WorkflowPreview',
)
@abstractclass
class Wrapper:
BASE_ATTRS: Tuple[str, ...] = ('id', )
def __init__(self, wrapped, parent=None, gi=None):
if not isinstance(wrapped, Mapping):
raise TypeError('wrapped object must be a mapping type')
try:
dumped = json.dumps(wrapped)
except (TypeError, ValueError):
raise ValueError('wrapped object must be JSON-serializable')
object.__setattr__(self, 'wrapped', json.loads(dumped))
for k in self.BASE_ATTRS:
object.__setattr__(self, k, self.wrapped.get(k))
object.__setattr__(self, '_cached_parent', parent)
object.__setattr__(self, 'is_modified', False)
object.__setattr__(self, 'gi', gi)
@property
def parent(self):
return self._cached_parent
@property
def is_mapped(self):
return self.id is not None
def unmap(self):
object.__setattr__(self, 'id', None)
def clone(self):
return self.__class__(self.wrapped)
def touch(self):
object.__setattr__(self, 'is_modified', True)
if self.parent:
self.parent.touch()
def to_json(self):
return json.dumps(self.wrapped)
@classmethod
def from_json(cls, jdef):
return cls(json.loads(jdef))
def __setattr__(self, name, value):
if name not in self.wrapped:
raise AttributeError("can't set attribute")
else:
self.wrapped[name] = value
object.__setattr__(self, name, value)
self.touch()
def __repr__(self):
return f"{self.__class__.__name__}({self.wrapped!r})"
class Step(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'input_steps',
'name',
'tool_id',
'tool_inputs',
'tool_version',
'type',
)
def __init__(self, step_dict, parent):
super().__init__(step_dict, parent=parent, gi=parent.gi)
try:
stype = step_dict['type']
except KeyError:
raise ValueError('not a step dict')
if stype not in {'data_collection_input', 'data_input', 'parameter_input', 'pause', 'subworkflow', 'tool'}:
raise ValueError(f"Unknown step type: {stype!r}")
class InvocationStep(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'action',
'job_id',
'order_index',
'state',
'update_time',
'workflow_step_id',
'workflow_step_label',
'workflow_step_uuid',
)
class Workflow(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'inputs',
'latest_workflow_uuid',
'name',
'owner',
'published',
'steps',
'tags',
)
POLLING_INTERVAL = 10 # for output state monitoring
def __init__(self, wf_dict, gi=None):
super().__init__(wf_dict, gi=gi)
missing_ids = []
if gi:
tools_list_by_id = [t.id for t in gi.tools.get_previews()]
else:
tools_list_by_id = []
tool_labels_to_ids = {}
for k, v in self.steps.items():
# convert step ids to str for consistency with outer keys
v['id'] = str(v['id'])
for i in v['input_steps'].values():
i['source_step'] = str(i['source_step'])
step = Step(v, self)
self.steps[k] = step
if step.type == 'tool':
if not step.tool_inputs or step.tool_id not in tools_list_by_id:
missing_ids.append(k)
tool_labels_to_ids.setdefault(step.tool_id, set()).add(step.id)
input_labels_to_ids = {}
for id_, d in self.inputs.items():
input_labels_to_ids.setdefault(d['label'], set()).add(id_)
object.__setattr__(self, 'input_labels_to_ids', input_labels_to_ids)
object.__setattr__(self, 'tool_labels_to_ids', tool_labels_to_ids)
dag, inv_dag = self._get_dag()
heads, tails = set(dag), set(inv_dag)
object.__setattr__(self, 'dag', dag)
object.__setattr__(self, 'inv_dag', inv_dag)
object.__setattr__(self, 'source_ids', heads - tails)
assert set(self.inputs) == self.data_collection_input_ids | self.data_input_ids | self.parameter_input_ids, \
f"inputs is {self.inputs!r}, while data_collection_input_ids is {self.data_collection_input_ids!r}, data_input_ids is {self.data_input_ids!r} and parameter_input_ids is {self.parameter_input_ids!r}"
object.__setattr__(self, 'sink_ids', tails - heads)
object.__setattr__(self, 'missing_ids', missing_ids)
def _get_dag(self):
dag, inv_dag = {}, {}
for s in self.steps.values():
for i in s.input_steps.values():
head, tail = i['source_step'], s.id
dag.setdefault(head, set()).add(tail)
inv_dag.setdefault(tail, set()).add(head)
return dag, inv_dag
def sorted_step_ids(self):
ids = []
source_ids = self.source_ids.copy()
inv_dag = {k: v.copy() for k, v in self.inv_dag.items()}
while source_ids:
head = source_ids.pop()
ids.append(head)
for tail in self.dag.get(head, []):
incoming = inv_dag[tail]
incoming.remove(head)
if not incoming:
source_ids.add(tail)
return ids
@property
def data_input_ids(self):
return {id_ for id_, s in self.steps.items() if s.type == 'data_input'}
@property
def data_collection_input_ids(self):
return {id_ for id_, s in self.steps.items() if s.type == 'data_collection_input'}
@property
def parameter_input_ids(self):
return {id_ for id_, s in self.steps.items() if s.type == 'parameter_input'}
@property
def tool_ids(self):
return {id_ for id_, s in self.steps.items() if s.type == 'tool'}
@property
def input_labels(self):
return set(self.input_labels_to_ids)
@property
def is_runnable(self):
return not self.missing_ids
def convert_input_map(self, input_map):
m = {}
for label, slot_ids in self.input_labels_to_ids.items():
datasets = input_map.get(label, [])
if not isinstance(datasets, Iterable):
datasets = [datasets]
if len(datasets) < len(slot_ids):
raise RuntimeError(f'not enough datasets for "{label}"')
for id_, ds in zip(slot_ids, datasets):
m[id_] = {'id': ds.id, 'src': ds.SRC}
return m
def preview(self):
getf = self.gi.workflows.get_previews
try:
p = [_ for _ in getf(published=True) if _.id == self.id][0]
except IndexError:
raise ValueError(f"no object for id {self.id}")
return p
def run(self, input_map=None, history='', params=None, import_inputs=False,
replacement_params=None, wait=False,
polling_interval=POLLING_INTERVAL, break_on_error=True):
if not self.is_mapped:
raise RuntimeError('workflow is not mapped to a Galaxy object')
if not self.is_runnable:
missing_tools_str = ', '.join(f"{self.steps[step_id].tool_id}[{step_id}]" for step_id in self.missing_ids)
raise RuntimeError(f"workflow has missing tools: {missing_tools_str}")
kwargs = {
'dataset_map': self.convert_input_map(input_map or {}),
'params': params,
'import_inputs_to_history': import_inputs,
'replacement_params': replacement_params,
}
if isinstance(history, History):
try:
kwargs['history_id'] = history.id
except AttributeError:
raise RuntimeError('history does not have an id')
elif isinstance(history, str):
kwargs['history_name'] = history
else:
raise TypeError(
'history must be either a history wrapper or a string')
res = self.gi.gi.workflows.run_workflow(self.id, **kwargs)
# res structure: {'history': HIST_ID, 'outputs': [CI_ID, CI_ID, ...]}
out_hist = self.gi.histories.get(res['history'])
content_infos_dict = {ci.id: ci for ci in out_hist.content_infos}
outputs = []
for output_id in res['outputs']:
if content_infos_dict[output_id].type == 'file':
outputs.append(out_hist.get_dataset(output_id))
elif content_infos_dict[output_id].type == 'collection':
outputs.append(out_hist.get_dataset_collection(output_id))
if wait:
self.gi._wait_datasets(outputs, polling_interval=polling_interval,
break_on_error=break_on_error)
return outputs, out_hist
def export(self):
return self.gi.gi.workflows.export_workflow_dict(self.id)
def delete(self):
self.gi.workflows.delete(id_=self.id)
self.unmap()
def invoke(self, inputs=None, params=None, history=None,
import_inputs_to_history=None, replacement_params=None,
allow_tool_state_corrections=True, inputs_by=None,
parameters_normalized=False):
inv_dict = self.gi.gi.workflows.invoke_workflow(
workflow_id=self.id,
inputs=inputs,
params=params,
history_id=history.id,
import_inputs_to_history=import_inputs_to_history,
replacement_params=replacement_params,
allow_tool_state_corrections=allow_tool_state_corrections,
inputs_by=inputs_by,
parameters_normalized=parameters_normalized
)
return self.gi.invocations.get(inv_dict['id'])
class Invocation(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'history_id',
'inputs',
'state',
'steps',
'update_time',
'uuid',
'workflow_id',
)
def __init__(self, inv_dict, gi=None):
super().__init__(inv_dict, gi=gi)
self.steps = [InvocationStep(step, self) for step in self.steps]
self.inputs = [{**v, 'label': k} for k, v in self.inputs.items()]
def sorted_step_ids(self):
return [step.id for step in sorted(self.steps, key=lambda step: step.order_index)]
def step_states(self):
return {step.state for step in self.steps}
def number_of_steps(self):
return len(self.steps)
def sorted_steps_by(self, indices=None, states=None, step_ids=None):
steps = self.steps
if indices is not None:
steps = filter(lambda step: step.order_index in indices, steps)
if states is not None:
steps = filter(lambda step: step.state in states, steps)
if step_ids is not None:
steps = filter(lambda step: step.id in step_ids, steps)
return sorted(steps, key=lambda step: step.order_index)
def cancel(self):
inv_dict = self.gi.gi.invocations.cancel_invocation(self.id)
self.__init__(inv_dict, gi=self.gi)
def refresh(self):
inv_dict = self.gi.gi.invocations.show_invocation(self.id)
self.__init__(inv_dict, gi=self.gi)
def run_step_actions(self, steps, actions):
if not len(steps) == len(actions):
raise RuntimeError(f'Different number of ``steps`` ({len(steps)}) and ``actions`` ({len(actions)}) in ``{self}.run_step_actions()``')
step_dict_list = [self.gi.gi.invocations.run_invocation_step_action(self.id, step.id, action) for step, action in zip(steps, actions)]
for step, step_dict in zip(steps, step_dict_list):
step.__init__(step_dict, parent=self)
def summary(self):
return self.gi.gi.invocations.get_invocation_summary(self.id)
def step_jobs_summary(self):
return self.gi.gi.invocations.get_invocation_step_jobs_summary(self.id)
def report(self):
return self.gi.gi.invocations.get_invocation_report(self.id)
def save_report_pdf(self, file_path, chunk_size=bioblend.CHUNK_SIZE):
self.gi.gi.invocations.get_invocation_report_pdf(self.id, file_path, chunk_size)
def biocompute_object(self):
return self.gi.gi.invocations.get_invocation_biocompute_object(self.id)
def wait(self, maxwait=12000, interval=3, check=True):
inv_dict = self.gi.gi.invocations.wait_for_invocation(self.id, maxwait=maxwait, interval=interval, check=check)
self.__init__(inv_dict, gi=self.gi)
class Dataset(Wrapper, metaclass=abc.ABCMeta):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'data_type',
'file_ext',
'file_name',
'file_size',
'genome_build',
'misc_info',
'name',
'state',
)
POLLING_INTERVAL = 1 # for state monitoring
def __init__(self, ds_dict, container, gi=None):
super().__init__(ds_dict, gi=gi)
object.__setattr__(self, 'container', container)
@property
@abc.abstractmethod
def _stream_url(self):
pass
def get_stream(self, chunk_size=bioblend.CHUNK_SIZE):
kwargs = {'stream': True}
if isinstance(self, LibraryDataset):
kwargs['params'] = {'ld_ids%5B%5D': self.id}
r = self.gi.gi.make_get_request(self._stream_url, **kwargs)
if isinstance(self, LibraryDataset) and r.status_code == 500:
# compatibility with older Galaxy releases
kwargs['params'] = {'ldda_ids%5B%5D': self.id}
r = self.gi.gi.make_get_request(self._stream_url, **kwargs)
r.raise_for_status()
return r.iter_content(chunk_size) # FIXME: client can't close r
def peek(self, chunk_size=bioblend.CHUNK_SIZE):
try:
return next(self.get_stream(chunk_size=chunk_size))
except StopIteration:
return b''
def download(self, file_object, chunk_size=bioblend.CHUNK_SIZE):
for chunk in self.get_stream(chunk_size=chunk_size):
file_object.write(chunk)
def get_contents(self, chunk_size=bioblend.CHUNK_SIZE):
return b''.join(self.get_stream(chunk_size=chunk_size))
def refresh(self):
gi_client = getattr(self.gi.gi, self.container.API_MODULE)
ds_dict = gi_client.show_dataset(self.container.id, self.id)
self.__init__(ds_dict, self.container, self.gi)
return self
def wait(self, polling_interval=POLLING_INTERVAL, break_on_error=True):
self.gi._wait_datasets([self], polling_interval=polling_interval,
break_on_error=break_on_error)
class HistoryDatasetAssociation(Dataset):
BASE_ATTRS = Dataset.BASE_ATTRS + ('annotation', 'deleted', 'purged', 'tags', 'visible')
SRC = 'hda'
@property
def _stream_url(self):
base_url = self.gi.gi.histories._make_url(module_id=self.container.id, contents=True)
return f"{base_url}/{self.id}/display"
def update(self, **kwds):
res = self.gi.gi.histories.update_dataset(self.container.id, self.id, **kwds)
self.container.refresh()
self.__init__(res, self.container, gi=self.gi)
return self
def delete(self, purge=False):
self.gi.gi.histories.delete_dataset(self.container.id, self.id, purge=purge)
self.container.refresh()
self.refresh()
class DatasetCollection(Wrapper, metaclass=abc.ABCMeta):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'collection_type',
'deleted',
'name',
'state',
)
def __init__(self, dsc_dict, container, gi=None):
super().__init__(dsc_dict, gi=gi)
object.__setattr__(self, 'container', container)
def refresh(self):
gi_client = getattr(self.gi.gi, self.container.API_MODULE)
dsc_dict = gi_client.show_dataset_collection(self.container.id, self.id)
self.__init__(dsc_dict, self.container, self.gi)
return self
@abc.abstractmethod
def delete(self):
pass
class HistoryDatasetCollectionAssociation(DatasetCollection):
BASE_ATTRS = DatasetCollection.BASE_ATTRS + ('tags', 'visible', 'elements')
SRC = 'hdca'
def delete(self):
self.gi.gi.histories.delete_dataset_collection(self.container.id, self.id)
self.container.refresh()
self.refresh()
@abstractclass
class LibRelatedDataset(Dataset):
@property
def _stream_url(self):
base_url = self.gi.gi.libraries._make_url()
return f"{base_url}/datasets/download/uncompressed"
class LibraryDatasetDatasetAssociation(LibRelatedDataset):
BASE_ATTRS = LibRelatedDataset.BASE_ATTRS + ('deleted',)
SRC = 'ldda'
class LibraryDataset(LibRelatedDataset):
SRC = 'ld'
def delete(self, purged=False):
self.gi.gi.libraries.delete_library_dataset(
self.container.id, self.id, purged=purged)
self.container.refresh()
self.refresh()
def update(self, **kwds):
res = self.gi.gi.libraries.update_library_dataset(self.id, **kwds)
self.container.refresh()
self.__init__(res, self.container, gi=self.gi)
return self
@abstractclass
class ContentInfo(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'name',
'type',
)
class LibraryContentInfo(ContentInfo):
class HistoryContentInfo(ContentInfo):
BASE_ATTRS = ContentInfo.BASE_ATTRS + ('deleted', 'state', 'visible')
class DatasetContainer(Wrapper, metaclass=abc.ABCMeta):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'name',
)
def __init__(self, c_dict, content_infos=None, gi=None):
super().__init__(c_dict, gi=gi)
if content_infos is None:
content_infos = []
object.__setattr__(self, 'content_infos', content_infos)
object.__setattr__(self, 'obj_gi_client', getattr(self.gi, self.API_MODULE))
@property
@abc.abstractmethod
def API_MODULE(self):
pass
@property
def dataset_ids(self):
return [_.id for _ in self.content_infos if _.type == 'file']
def preview(self):
getf = self.obj_gi_client.get_previews
try:
p = [_ for _ in getf() if _.id == self.id][0]
except IndexError:
try:
p = [_ for _ in getf(deleted=True) if _.id == self.id][0]
except IndexError:
raise ValueError(f"no object for id {self.id}")
return p
def refresh(self):
fresh = self.obj_gi_client.get(self.id)
self.__init__(
fresh.wrapped, content_infos=fresh.content_infos, gi=self.gi)
return self
def get_dataset(self, ds_id):
gi_client = getattr(self.gi.gi, self.API_MODULE)
ds_dict = gi_client.show_dataset(self.id, ds_id)
return self.DS_TYPE(ds_dict, self, gi=self.gi)
def get_datasets(self, name=None):
if name is None:
ds_ids = self.dataset_ids
else:
ds_ids = [_.id for _ in self.content_infos if _.name == name]
return [self.get_dataset(_) for _ in ds_ids]
class History(DatasetContainer):
BASE_ATTRS = DatasetContainer.BASE_ATTRS + ('annotation', 'published', 'state', 'state_ids', 'state_details', 'tags')
DS_TYPE = HistoryDatasetAssociation
DSC_TYPE = HistoryDatasetCollectionAssociation
CONTENT_INFO_TYPE = HistoryContentInfo
API_MODULE = 'histories'
def update(self, **kwds):
self.gi.gi.histories.update_history(self.id, **kwds)
self.refresh()
return self
def delete(self, purge=False):
self.gi.histories.delete(id_=self.id, purge=purge)
self.refresh()
self.unmap()
def import_dataset(self, lds):
if not self.is_mapped:
raise RuntimeError('history is not mapped to a Galaxy object')
if not isinstance(lds, LibraryDataset):
raise TypeError('lds is not a LibraryDataset')
res = self.gi.gi.histories.upload_dataset_from_library(self.id, lds.id)
if not isinstance(res, Mapping):
raise RuntimeError(
f"upload_dataset_from_library: unexpected reply: {res!r}"
)
self.refresh()
return self.get_dataset(res['id'])
def upload_file(self, path, **kwargs):
out_dict = self.gi.gi.tools.upload_file(path, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
upload_dataset = upload_file
def upload_from_ftp(self, path, **kwargs):
out_dict = self.gi.gi.tools.upload_from_ftp(path, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
def paste_content(self, content, **kwargs):
out_dict = self.gi.gi.tools.paste_content(content, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
def export(self, gzip=True, include_hidden=False, include_deleted=False,
wait=False, maxwait=None):
return self.gi.gi.histories.export_history(
self.id, gzip=gzip, include_hidden=include_hidden,
include_deleted=include_deleted, wait=wait, maxwait=maxwait)
def download(self, jeha_id, outf, chunk_size=bioblend.CHUNK_SIZE):
return self.gi.gi.histories.download_history(
self.id, jeha_id, outf, chunk_size=chunk_size)
def create_dataset_collection(self, collection_description):
dataset_collection = self.gi.gi.histories.create_dataset_collection(self.id, collection_description)
self.refresh()
return self.get_dataset_collection(dataset_collection['id'])
def get_dataset_collection(self, dsc_id):
dsc_dict = self.gi.gi.histories.show_dataset_collection(self.id, dsc_id)
return self.DSC_TYPE(dsc_dict, self, gi=self.gi)
class Library(DatasetContainer):
BASE_ATTRS = DatasetContainer.BASE_ATTRS + ('description', 'synopsis')
DS_TYPE = LibraryDataset
CONTENT_INFO_TYPE = LibraryContentInfo
API_MODULE = 'libraries'
@property
def folder_ids(self):
return [_.id for _ in self.content_infos if _.type == 'folder']
def delete(self):
self.gi.libraries.delete(id_=self.id)
self.refresh()
self.unmap()
def _pre_upload(self, folder):
if not self.is_mapped:
raise RuntimeError('library is not mapped to a Galaxy object')
return None if folder is None else folder.id
def upload_data(self, data, folder=None, **kwargs):
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_contents(
self.id, data, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_url(self, url, folder=None, **kwargs):
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_from_url(
self.id, url, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_local(self, path, folder=None, **kwargs):
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_from_local_path(
self.id, path, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_galaxy_fs(self, paths, folder=None, link_data_only=None, **kwargs):
fid = self._pre_upload(folder)
if isinstance(paths, str):
paths = (paths,)
paths = '\n'.join(paths)
res = self.gi.gi.libraries.upload_from_galaxy_filesystem(
self.id, paths, folder_id=fid, link_data_only=link_data_only,
**kwargs)
if res is None:
raise RuntimeError('upload_from_galaxy_filesystem: no reply')
if not isinstance(res, Sequence):
raise RuntimeError(
f"upload_from_galaxy_filesystem: unexpected reply: {res!r}"
)
new_datasets = [
self.get_dataset(ds_info['id']) for ds_info in res
]
self.refresh()
return new_datasets
def copy_from_dataset(self, hda, folder=None, message=''):
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.copy_from_dataset(
self.id, hda.id, folder_id=fid, message=message)
self.refresh()
return self.get_dataset(res['library_dataset_id'])
def create_folder(self, name, description=None, base_folder=None):
bfid = None if base_folder is None else base_folder.id
res = self.gi.gi.libraries.create_folder(
self.id, name, description=description, base_folder_id=bfid)
self.refresh()
return self.get_folder(res[0]['id'])
def get_folder(self, f_id):
f_dict = self.gi.gi.libraries.show_folder(self.id, f_id)
return Folder(f_dict, self, gi=self.gi)
@property
def root_folder(self):
return self.get_folder(self.gi.gi.libraries._get_root_folder_id(self.id))
class Folder(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'description',
'item_count',
'name',
)
def __init__(self, f_dict, container, gi=None):
super().__init__(f_dict, gi=gi)
object.__setattr__(self, 'container', container)
@property
def parent(self):
if self._cached_parent is None:
object.__setattr__(self,
'_cached_parent',
self._get_parent())
return self._cached_parent
def _get_parent(self):
parent_id = self.wrapped['parent_id']
if parent_id is None:
return None
return self.container.get_folder(parent_id)
def refresh(self):
f_dict = self.gi.gi.libraries.show_folder(self.container.id, self.id)
self.__init__(f_dict, self.container, gi=self.gi)
return self
class Tool(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'name',
'version',
)
POLLING_INTERVAL = 10 # for output state monitoring
def run(self, inputs, history, wait=False,
polling_interval=POLLING_INTERVAL):
for k, v in inputs.items():
if isinstance(v, Dataset):
inputs[k] = {'src': v.SRC, 'id': v.id}
out_dict = self.gi.gi.tools.run_tool(history.id, self.id, inputs)
outputs = [history.get_dataset(_['id']) for _ in out_dict['outputs']]
if wait:
self.gi._wait_datasets(outputs, polling_interval=polling_interval)
return outputs
class Job(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + ('state',)
@abstractclass
class DatasetContainerPreview(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'name',
)
class LibraryPreview(DatasetContainerPreview):
class HistoryPreview(DatasetContainerPreview):
BASE_ATTRS = DatasetContainerPreview.BASE_ATTRS + (
'annotation',
'published',
'purged',
'tags',
)
class WorkflowPreview(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'latest_workflow_uuid',
'name',
'number_of_steps',
'owner',
'published',
'show_in_tool_panel',
'tags',
)
class InvocationPreview(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'history_id',
'id',
'state',
'update_time',
'uuid',
'workflow_id',
)
class JobPreview(Wrapper):
BASE_ATTRS = Wrapper.BASE_ATTRS + ('state',)
| true
| true
|
f719fecd156687882e482eac8d27cf8aaffcf379
| 177
|
py
|
Python
|
python/positive.py
|
scienceacademy/apcsp_2021
|
11efd0216d3042e556e726268c622d8f0d568c18
|
[
"MIT"
] | null | null | null |
python/positive.py
|
scienceacademy/apcsp_2021
|
11efd0216d3042e556e726268c622d8f0d568c18
|
[
"MIT"
] | null | null | null |
python/positive.py
|
scienceacademy/apcsp_2021
|
11efd0216d3042e556e726268c622d8f0d568c18
|
[
"MIT"
] | null | null | null |
def main():
n = get_positive_int()
def get_positive_int():
while True:
n = int(input("Enter a positive number: "))
if n > 0:
return n
main()
| 19.666667
| 51
| 0.542373
|
def main():
n = get_positive_int()
def get_positive_int():
while True:
n = int(input("Enter a positive number: "))
if n > 0:
return n
main()
| true
| true
|
f719fee29c71e4ea44c3434fb019c8f5e47ff986
| 16,096
|
py
|
Python
|
tests/test_brew_views.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 3
|
2019-03-11T04:30:06.000Z
|
2020-01-26T03:21:52.000Z
|
tests/test_brew_views.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 23
|
2019-02-06T20:37:37.000Z
|
2020-06-01T07:08:35.000Z
|
tests/test_brew_views.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import pytest
from flask import url_for
from brewlog.ext import db
from brewlog.models import Brew
from . import BrewlogTests
class BrewViewTests(BrewlogTests):
@pytest.fixture(autouse=True)
def set_up(self, user_factory, brewery_factory):
self.public_user = user_factory(
first_name='John', last_name='Public'
)
self.public_brewery = brewery_factory(
name='public brewery', brewer=self.public_user
)
self.hidden_user = user_factory(
is_public=False, first_name='Rebecca', last_name='Hidden'
)
self.hidden_brewery = brewery_factory(
name='hidden brewery', brewer=self.hidden_user
)
@pytest.mark.usefixtures('client_class')
class TestBrewDetailsView(BrewViewTests):
def url(self, brew):
return url_for('brew.details', brew_id=brew.id)
def test_get_404(self):
rv = self.client.get(url_for('brew.details', brew_id=666))
assert rv.status_code == 404
def test_get_no_access_hidden_brewery(self, brew_factory):
brew = brew_factory(brewery=self.hidden_brewery, name='hb1')
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert rv.status_code == 404
def test_get_no_access_hidden_brew(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, is_public=False, name='hb1'
)
self.login(self.hidden_user.email)
rv = self.client.get(self.url(brew))
assert rv.status_code == 404
def test_post_anon(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, name='pb1', code='xxx'
)
data = {
'name': brew.name,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data)
assert rv.status_code == 403
def test_post_non_brewer(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, name='pb1', code='xxx'
)
self.login(self.hidden_user.email)
data = {
'name': brew.name,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data, follow_redirects=True)
assert rv.status_code == 403
def test_post_data_ok(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, name='pb1', code='xxx'
)
self.login(self.public_user.email)
data = {
'name': brew.name,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data, follow_redirects=True)
assert rv.status_code == 200
assert 'data updated' in rv.text
assert Brew.query.get(brew.id).code == data['code']
def test_post_data_missing(self, brew_factory):
brew = brew_factory(brewery=self.public_brewery, name='pb1', code='xxx')
self.login(self.public_user.email)
data = {
'name': None,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data, follow_redirects=True)
assert rv.status_code == 200
assert 'field is required' in rv.text
assert 'data updated' not in rv.text
def test_state_form_present(self, brew_factory):
brewed = datetime.date(1992, 12, 4)
bottled = datetime.date(1993, 1, 12)
taped = datetime.date(1993, 3, 8)
brew = brew_factory(
brewery=self.public_brewery, name='pb1', date_brewed=brewed,
bottling_date=bottled, tapped=taped
)
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert url_for('brew.chgstate', brew_id=brew.id) in rv.text
def test_attenuation_display_none(self, brew_factory):
brew = brew_factory(brewery=self.public_brewery, name='pb1')
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert 'apparent' not in rv.text
@pytest.mark.usefixtures('client_class')
class TestBrewDetailsNavigation(BrewViewTests):
def url(self, brew):
return url_for('brew.details', brew_id=brew.id)
@pytest.mark.parametrize('anon', [
False, True,
], ids=['authenticated', 'anonymous'])
def test_brew_navigation_non_owner(self, anon, brew_factory):
p2_brew = brew_factory(brewery=self.public_brewery)
p1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
brew = brew_factory(brewery=self.public_brewery)
n1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
n2_brew = brew_factory(brewery=self.public_brewery)
if not anon:
self.login(self.hidden_user.email)
rv = self.client.get(self.url(brew))
assert f'href="{self.url(p2_brew)}"' in rv.text
assert f'href="{self.url(p1_brew)}"' not in rv.text
assert f'href="{self.url(n1_brew)}"' not in rv.text
assert f'href="{self.url(n2_brew)}"' in rv.text
def test_brew_navigation_owner(self, brew_factory):
p1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
brew = brew_factory(brewery=self.public_brewery)
n1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert f'href="{self.url(p1_brew)}"' in rv.text
assert f'href="{self.url(n1_brew)}"' in rv.text
@pytest.mark.usefixtures('client_class')
class TestBrewListView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self):
self.url = url_for('brew.all')
def details_url(self, brew):
return url_for('brew.details', brew_id=brew.id)
def delete_url(self, brew):
return url_for('brew.delete', brew_id=brew.id)
def test_anon(self, brew_factory):
hb_hb = brew_factory(brewery=self.hidden_brewery, is_public=False)
pb_hb = brew_factory(brewery=self.hidden_brewery, is_public=True)
pb_pb = brew_factory(brewery=self.public_brewery, is_public=True)
hb_pb = brew_factory(brewery=self.public_brewery, is_public=False)
rv = self.client.get(self.url)
assert url_for('brew.details', brew_id=pb_pb.id) in rv.text
assert url_for('brew.delete', brew_id=pb_pb.id) not in rv.text
assert url_for('brew.details', brew_id=hb_hb.id) not in rv.text
assert url_for('brew.details', brew_id=pb_hb.id) not in rv.text
assert url_for('brew.details', brew_id=hb_pb.id) not in rv.text
def test_authenticated(self, user_factory, brewery_factory, brew_factory):
user2 = user_factory(first_name='Ivory', last_name='Tower')
brewery2 = brewery_factory(brewer=user2, name='brewery2')
pb1 = brew_factory(brewery=self.public_brewery)
pb2 = brew_factory(brewery=brewery2)
hb1 = brew_factory(name='hidden1', brewery=self.public_brewery, is_public=False)
hb2 = brew_factory(name='hidden2', brewery=brewery2, is_public=False)
hb3 = brew_factory(name='hidden3', brewery=self.hidden_brewery)
hb4 = brew_factory(name='hidden4', brewery=self.hidden_brewery, is_public=False)
self.login(email=self.public_user.email)
rv = self.client.get(self.url)
assert f'href="{self.details_url(pb1)}"' in rv.text
assert f'href="{self.delete_url(pb1)}"' in rv.text
assert f'href="{self.details_url(pb2)}"' in rv.text
assert f'href="{self.delete_url(pb2)}"' not in rv.text
assert f'href="{self.details_url(hb1)}"' in rv.text
assert f'href="{self.details_url(hb2)}"' not in rv.text
assert f'href="{self.details_url(hb3)}"' not in rv.text
assert f'href="{self.details_url(hb4)}"' not in rv.text
@pytest.mark.usefixtures('client_class')
class TestJsonViews(BrewViewTests):
def test_prefetch_anon(self, brew_factory):
brew1 = brew_factory(brewery=self.public_brewery, name='pb1')
brew_factory(brewery=self.hidden_brewery, name='hb2')
rv = self.client.get(url_for('brew.search'))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew1.name
def test_prefetch_auth(self, brew_factory):
brew_factory(brewery=self.public_brewery, name='pb1')
brew_h = brew_factory(brewery=self.public_brewery, name='hb2', is_public=False)
self.login(self.public_user.email)
rv = self.client.get(url_for('brew.search'))
data = rv.get_json()
assert len(data) == 2
names = [x['name'] for x in data]
assert brew_h.name in names
def test_search_anon(self, brew_factory):
brew_p = brew_factory(brewery=self.public_brewery, name='pb1')
brew_h = brew_factory(brewery=self.public_brewery, name='hb2', is_public=False)
rv = self.client.get(url_for('brew.search', q=brew_p.name))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew_p.name
rv = self.client.get(url_for('brew.search', q=brew_h.name))
data = rv.get_json()
assert len(data) == 0
def test_search_auth(self, brew_factory):
brew_p = brew_factory(brewery=self.public_brewery, name='pb1')
brew_h = brew_factory(brewery=self.public_brewery, name='hb2', is_public=False)
self.login(self.public_user.email)
rv = self.client.get(url_for('brew.search', q=brew_p.name))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew_p.name
rv = self.client.get(url_for('brew.search', q=brew_h.name))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew_h.name
@pytest.mark.usefixtures('client_class')
class TestStateChangeView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self, brew_factory):
self.brew = brew_factory(
brewery=self.public_brewery,
name='pale ale',
date_brewed=datetime.date.today() - datetime.timedelta(days=30),
bottling_date=datetime.date.today() - datetime.timedelta(days=10),
)
self.url = url_for('brew.chgstate', brew_id=self.brew.id)
def test_brew_tap_anon(self):
rv = self.client.post(self.url, data={'action': 'tap'})
assert url_for('auth.select') in rv.headers['Location']
def test_brew_tap_nonbrewer(self):
self.login(self.hidden_user.email)
rv = self.client.post(self.url, data={'action': 'tap'}, follow_redirects=True)
assert rv.status_code == 403
assert "You don't have permission to access this page" in rv.text
def test_brew_tap_brewer(self):
self.login(self.public_user.email)
rv = self.client.post(self.url, data={'action': 'tap'}, follow_redirects=True)
assert f'</strong>: {Brew.STATE_TAPPED}' in rv.text
assert 'state changed' in rv.text
def test_brew_untap_brewer(self):
self.brew.tapped = datetime.datetime.today() - datetime.timedelta(days=2)
db.session.add(self.brew)
db.session.commit()
self.login(self.public_user.email)
rv = self.client.post(
self.url, data={'action': 'untap'}, follow_redirects=True
)
assert f'</strong>: {Brew.STATE_MATURING}' in rv.text
assert 'state changed' in rv.text
def test_brew_finish_brewer(self):
self.login(self.public_user.email)
rv = self.client.post(
self.url, data={'action': 'finish'}, follow_redirects=True
)
assert f'</strong>: {Brew.STATE_FINISHED}' in rv.text
assert 'state changed' in rv.text
assert self.brew.tapped is None
def test_invalid_state(self):
self.login(self.public_user.email)
rv = self.client.post(
self.url, data={'action': 'dummy'}, follow_redirects=True
)
assert 'invalid state' in rv.text
@pytest.mark.usefixtures('client_class')
class TestBrewAddView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self):
self.url = url_for('brew.add')
def test_get_anon(self):
rv = self.client.get(self.url)
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['location']
def test_get_authenticated(self):
self.login(email=self.public_user.email)
rv = self.client.get(self.url)
assert f'action="{self.url}"' in rv.text
def test_post_anon(self):
data = {
'name': 'pale ale',
'brewery': self.public_brewery.id,
'carbonation_type': 'keg with priming',
'carbonation_level': 'low',
}
rv = self.client.post(self.url, data=data)
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['location']
def test_post_authenticated_own_brewery(self):
name = 'pale ale'
data = {
'name': name,
'brewery': self.public_brewery.id,
'carbonation_type': 'keg with priming',
'carbonation_level': 'low',
}
self.login(email=self.public_user.email)
rv = self.client.post(self.url, data=data, follow_redirects=True)
assert f'{name} created' in rv.text
def test_post_authenticated_other_brewery(self):
data = {
'name': 'pale ale',
'brewery': self.public_brewery.id,
'carbonation_type': 'keg with priming',
'carbonation_level': 'low',
}
self.login(email=self.hidden_user.email)
rv = self.client.post(self.url, data=data)
assert rv.status_code == 200
assert 'Not a valid choice' in rv.text
assert Brew.query.filter_by(name=data['name']).first() is None
@pytest.mark.usefixtures('client_class')
class TestBrewDeleteView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self, brew_factory):
self.brew = brew_factory(
brewery=self.public_brewery,
name='pale ale',
date_brewed=datetime.date.today() - datetime.timedelta(days=30),
bottling_date=datetime.date.today() - datetime.timedelta(days=10),
)
self.url = url_for('brew.delete', brew_id=self.brew.id)
def test_get_anon(self):
rv = self.client.get(self.url)
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['Location']
def test_get_owner(self):
self.login(email=self.public_user.email)
rv = self.client.get(self.url)
assert f'action="{self.url}"' in rv.text
def test_get_non_owner(self):
self.login(email=self.hidden_user.email)
rv = self.client.get(self.url)
assert rv.status_code == 403
def test_post_anon(self):
rv = self.client.post(self.url, data={'delete_it': True})
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['Location']
assert Brew.query.get(self.brew.id) is not None
def test_post_owner(self):
self.login(email=self.public_user.email)
rv = self.client.post(self.url, data={'delete_it': True}, follow_redirects=True)
assert rv.status_code == 200
assert Brew.query.get(self.brew.id) is None
def test_post_non_owner(self):
self.login(email=self.hidden_user.email)
rv = self.client.post(self.url, data={'delete_it': True}, follow_redirects=True)
assert rv.status_code == 403
| 38.879227
| 88
| 0.636245
|
import datetime
import pytest
from flask import url_for
from brewlog.ext import db
from brewlog.models import Brew
from . import BrewlogTests
class BrewViewTests(BrewlogTests):
@pytest.fixture(autouse=True)
def set_up(self, user_factory, brewery_factory):
self.public_user = user_factory(
first_name='John', last_name='Public'
)
self.public_brewery = brewery_factory(
name='public brewery', brewer=self.public_user
)
self.hidden_user = user_factory(
is_public=False, first_name='Rebecca', last_name='Hidden'
)
self.hidden_brewery = brewery_factory(
name='hidden brewery', brewer=self.hidden_user
)
@pytest.mark.usefixtures('client_class')
class TestBrewDetailsView(BrewViewTests):
def url(self, brew):
return url_for('brew.details', brew_id=brew.id)
def test_get_404(self):
rv = self.client.get(url_for('brew.details', brew_id=666))
assert rv.status_code == 404
def test_get_no_access_hidden_brewery(self, brew_factory):
brew = brew_factory(brewery=self.hidden_brewery, name='hb1')
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert rv.status_code == 404
def test_get_no_access_hidden_brew(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, is_public=False, name='hb1'
)
self.login(self.hidden_user.email)
rv = self.client.get(self.url(brew))
assert rv.status_code == 404
def test_post_anon(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, name='pb1', code='xxx'
)
data = {
'name': brew.name,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data)
assert rv.status_code == 403
def test_post_non_brewer(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, name='pb1', code='xxx'
)
self.login(self.hidden_user.email)
data = {
'name': brew.name,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data, follow_redirects=True)
assert rv.status_code == 403
def test_post_data_ok(self, brew_factory):
brew = brew_factory(
brewery=self.public_brewery, name='pb1', code='xxx'
)
self.login(self.public_user.email)
data = {
'name': brew.name,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data, follow_redirects=True)
assert rv.status_code == 200
assert 'data updated' in rv.text
assert Brew.query.get(brew.id).code == data['code']
def test_post_data_missing(self, brew_factory):
brew = brew_factory(brewery=self.public_brewery, name='pb1', code='xxx')
self.login(self.public_user.email)
data = {
'name': None,
'brewery': brew.brewery.id,
'code': '001',
'carbonation_level': 'low',
'carbonation_type': 'bottles with priming',
}
rv = self.client.post(self.url(brew), data=data, follow_redirects=True)
assert rv.status_code == 200
assert 'field is required' in rv.text
assert 'data updated' not in rv.text
def test_state_form_present(self, brew_factory):
brewed = datetime.date(1992, 12, 4)
bottled = datetime.date(1993, 1, 12)
taped = datetime.date(1993, 3, 8)
brew = brew_factory(
brewery=self.public_brewery, name='pb1', date_brewed=brewed,
bottling_date=bottled, tapped=taped
)
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert url_for('brew.chgstate', brew_id=brew.id) in rv.text
def test_attenuation_display_none(self, brew_factory):
brew = brew_factory(brewery=self.public_brewery, name='pb1')
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert 'apparent' not in rv.text
@pytest.mark.usefixtures('client_class')
class TestBrewDetailsNavigation(BrewViewTests):
def url(self, brew):
return url_for('brew.details', brew_id=brew.id)
@pytest.mark.parametrize('anon', [
False, True,
], ids=['authenticated', 'anonymous'])
def test_brew_navigation_non_owner(self, anon, brew_factory):
p2_brew = brew_factory(brewery=self.public_brewery)
p1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
brew = brew_factory(brewery=self.public_brewery)
n1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
n2_brew = brew_factory(brewery=self.public_brewery)
if not anon:
self.login(self.hidden_user.email)
rv = self.client.get(self.url(brew))
assert f'href="{self.url(p2_brew)}"' in rv.text
assert f'href="{self.url(p1_brew)}"' not in rv.text
assert f'href="{self.url(n1_brew)}"' not in rv.text
assert f'href="{self.url(n2_brew)}"' in rv.text
def test_brew_navigation_owner(self, brew_factory):
p1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
brew = brew_factory(brewery=self.public_brewery)
n1_brew = brew_factory(brewery=self.public_brewery, is_public=False)
self.login(self.public_user.email)
rv = self.client.get(self.url(brew))
assert f'href="{self.url(p1_brew)}"' in rv.text
assert f'href="{self.url(n1_brew)}"' in rv.text
@pytest.mark.usefixtures('client_class')
class TestBrewListView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self):
self.url = url_for('brew.all')
def details_url(self, brew):
return url_for('brew.details', brew_id=brew.id)
def delete_url(self, brew):
return url_for('brew.delete', brew_id=brew.id)
def test_anon(self, brew_factory):
hb_hb = brew_factory(brewery=self.hidden_brewery, is_public=False)
pb_hb = brew_factory(brewery=self.hidden_brewery, is_public=True)
pb_pb = brew_factory(brewery=self.public_brewery, is_public=True)
hb_pb = brew_factory(brewery=self.public_brewery, is_public=False)
rv = self.client.get(self.url)
assert url_for('brew.details', brew_id=pb_pb.id) in rv.text
assert url_for('brew.delete', brew_id=pb_pb.id) not in rv.text
assert url_for('brew.details', brew_id=hb_hb.id) not in rv.text
assert url_for('brew.details', brew_id=pb_hb.id) not in rv.text
assert url_for('brew.details', brew_id=hb_pb.id) not in rv.text
def test_authenticated(self, user_factory, brewery_factory, brew_factory):
user2 = user_factory(first_name='Ivory', last_name='Tower')
brewery2 = brewery_factory(brewer=user2, name='brewery2')
pb1 = brew_factory(brewery=self.public_brewery)
pb2 = brew_factory(brewery=brewery2)
hb1 = brew_factory(name='hidden1', brewery=self.public_brewery, is_public=False)
hb2 = brew_factory(name='hidden2', brewery=brewery2, is_public=False)
hb3 = brew_factory(name='hidden3', brewery=self.hidden_brewery)
hb4 = brew_factory(name='hidden4', brewery=self.hidden_brewery, is_public=False)
self.login(email=self.public_user.email)
rv = self.client.get(self.url)
assert f'href="{self.details_url(pb1)}"' in rv.text
assert f'href="{self.delete_url(pb1)}"' in rv.text
assert f'href="{self.details_url(pb2)}"' in rv.text
assert f'href="{self.delete_url(pb2)}"' not in rv.text
assert f'href="{self.details_url(hb1)}"' in rv.text
assert f'href="{self.details_url(hb2)}"' not in rv.text
assert f'href="{self.details_url(hb3)}"' not in rv.text
assert f'href="{self.details_url(hb4)}"' not in rv.text
@pytest.mark.usefixtures('client_class')
class TestJsonViews(BrewViewTests):
def test_prefetch_anon(self, brew_factory):
brew1 = brew_factory(brewery=self.public_brewery, name='pb1')
brew_factory(brewery=self.hidden_brewery, name='hb2')
rv = self.client.get(url_for('brew.search'))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew1.name
def test_prefetch_auth(self, brew_factory):
brew_factory(brewery=self.public_brewery, name='pb1')
brew_h = brew_factory(brewery=self.public_brewery, name='hb2', is_public=False)
self.login(self.public_user.email)
rv = self.client.get(url_for('brew.search'))
data = rv.get_json()
assert len(data) == 2
names = [x['name'] for x in data]
assert brew_h.name in names
def test_search_anon(self, brew_factory):
brew_p = brew_factory(brewery=self.public_brewery, name='pb1')
brew_h = brew_factory(brewery=self.public_brewery, name='hb2', is_public=False)
rv = self.client.get(url_for('brew.search', q=brew_p.name))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew_p.name
rv = self.client.get(url_for('brew.search', q=brew_h.name))
data = rv.get_json()
assert len(data) == 0
def test_search_auth(self, brew_factory):
brew_p = brew_factory(brewery=self.public_brewery, name='pb1')
brew_h = brew_factory(brewery=self.public_brewery, name='hb2', is_public=False)
self.login(self.public_user.email)
rv = self.client.get(url_for('brew.search', q=brew_p.name))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew_p.name
rv = self.client.get(url_for('brew.search', q=brew_h.name))
data = rv.get_json()
assert len(data) == 1
assert data[0]['name'] == brew_h.name
@pytest.mark.usefixtures('client_class')
class TestStateChangeView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self, brew_factory):
self.brew = brew_factory(
brewery=self.public_brewery,
name='pale ale',
date_brewed=datetime.date.today() - datetime.timedelta(days=30),
bottling_date=datetime.date.today() - datetime.timedelta(days=10),
)
self.url = url_for('brew.chgstate', brew_id=self.brew.id)
def test_brew_tap_anon(self):
rv = self.client.post(self.url, data={'action': 'tap'})
assert url_for('auth.select') in rv.headers['Location']
def test_brew_tap_nonbrewer(self):
self.login(self.hidden_user.email)
rv = self.client.post(self.url, data={'action': 'tap'}, follow_redirects=True)
assert rv.status_code == 403
assert "You don't have permission to access this page" in rv.text
def test_brew_tap_brewer(self):
self.login(self.public_user.email)
rv = self.client.post(self.url, data={'action': 'tap'}, follow_redirects=True)
assert f'</strong>: {Brew.STATE_TAPPED}' in rv.text
assert 'state changed' in rv.text
def test_brew_untap_brewer(self):
self.brew.tapped = datetime.datetime.today() - datetime.timedelta(days=2)
db.session.add(self.brew)
db.session.commit()
self.login(self.public_user.email)
rv = self.client.post(
self.url, data={'action': 'untap'}, follow_redirects=True
)
assert f'</strong>: {Brew.STATE_MATURING}' in rv.text
assert 'state changed' in rv.text
def test_brew_finish_brewer(self):
self.login(self.public_user.email)
rv = self.client.post(
self.url, data={'action': 'finish'}, follow_redirects=True
)
assert f'</strong>: {Brew.STATE_FINISHED}' in rv.text
assert 'state changed' in rv.text
assert self.brew.tapped is None
def test_invalid_state(self):
self.login(self.public_user.email)
rv = self.client.post(
self.url, data={'action': 'dummy'}, follow_redirects=True
)
assert 'invalid state' in rv.text
@pytest.mark.usefixtures('client_class')
class TestBrewAddView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self):
self.url = url_for('brew.add')
def test_get_anon(self):
rv = self.client.get(self.url)
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['location']
def test_get_authenticated(self):
self.login(email=self.public_user.email)
rv = self.client.get(self.url)
assert f'action="{self.url}"' in rv.text
def test_post_anon(self):
data = {
'name': 'pale ale',
'brewery': self.public_brewery.id,
'carbonation_type': 'keg with priming',
'carbonation_level': 'low',
}
rv = self.client.post(self.url, data=data)
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['location']
def test_post_authenticated_own_brewery(self):
name = 'pale ale'
data = {
'name': name,
'brewery': self.public_brewery.id,
'carbonation_type': 'keg with priming',
'carbonation_level': 'low',
}
self.login(email=self.public_user.email)
rv = self.client.post(self.url, data=data, follow_redirects=True)
assert f'{name} created' in rv.text
def test_post_authenticated_other_brewery(self):
data = {
'name': 'pale ale',
'brewery': self.public_brewery.id,
'carbonation_type': 'keg with priming',
'carbonation_level': 'low',
}
self.login(email=self.hidden_user.email)
rv = self.client.post(self.url, data=data)
assert rv.status_code == 200
assert 'Not a valid choice' in rv.text
assert Brew.query.filter_by(name=data['name']).first() is None
@pytest.mark.usefixtures('client_class')
class TestBrewDeleteView(BrewViewTests):
@pytest.fixture(autouse=True)
def set_up2(self, brew_factory):
self.brew = brew_factory(
brewery=self.public_brewery,
name='pale ale',
date_brewed=datetime.date.today() - datetime.timedelta(days=30),
bottling_date=datetime.date.today() - datetime.timedelta(days=10),
)
self.url = url_for('brew.delete', brew_id=self.brew.id)
def test_get_anon(self):
rv = self.client.get(self.url)
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['Location']
def test_get_owner(self):
self.login(email=self.public_user.email)
rv = self.client.get(self.url)
assert f'action="{self.url}"' in rv.text
def test_get_non_owner(self):
self.login(email=self.hidden_user.email)
rv = self.client.get(self.url)
assert rv.status_code == 403
def test_post_anon(self):
rv = self.client.post(self.url, data={'delete_it': True})
assert rv.status_code == 302
assert url_for('auth.select') in rv.headers['Location']
assert Brew.query.get(self.brew.id) is not None
def test_post_owner(self):
self.login(email=self.public_user.email)
rv = self.client.post(self.url, data={'delete_it': True}, follow_redirects=True)
assert rv.status_code == 200
assert Brew.query.get(self.brew.id) is None
def test_post_non_owner(self):
self.login(email=self.hidden_user.email)
rv = self.client.post(self.url, data={'delete_it': True}, follow_redirects=True)
assert rv.status_code == 403
| true
| true
|
f719ffebb722b8308f0638a092a790eb9e2845a8
| 18,480
|
py
|
Python
|
mindmeld/converter/dialogflow.py
|
derekmpham/mindmeld
|
18189f956e4e3eb92df61fde95ec82f73b9efa91
|
[
"Apache-2.0"
] | null | null | null |
mindmeld/converter/dialogflow.py
|
derekmpham/mindmeld
|
18189f956e4e3eb92df61fde95ec82f73b9efa91
|
[
"Apache-2.0"
] | null | null | null |
mindmeld/converter/dialogflow.py
|
derekmpham/mindmeld
|
18189f956e4e3eb92df61fde95ec82f73b9efa91
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the DialogflowConverter class used to convert Dialogflow projects
into Mindmeld projects"""
import json
import logging
import os
import re
from sklearn.model_selection import train_test_split
from mindmeld.converter.converter import Converter
logger = logging.getLogger(__name__)
class DialogflowConverter(Converter):
"""The class is a sub class of the abstract Converter class. This class
contains the methods required to convert a Dialogflow project into a MindMeld project
"""
sys_entity_map = {
"@sys.date-time": "sys_interval",
"@sys.date": "sys_time",
"@sys.date-period": "sys_interval",
"@sys.time": "sys_time",
"@sys.time-period": "sys_duration",
"@sys.duration": "sys_duration",
"@sys.number": "sys_number",
"@sys.cardinal": "sys_number",
"@sys.ordinal": "sys_ordinal",
"@sys.unit-currency": "sys_amount-of-money",
"@sys.unit-volume": "sys_volume",
"@sys.email": "sys_email",
"@sys.phone-number": "sys_phone-number",
"@sys.url": "sys_url",
}
# TODO: provide support for entities listed in sys_entity_map_todo
sys_entity_map_todo = [
"@sys.number-integer",
"@sys.number-sequence",
"@sys.flight-number",
"@sys.unit-area",
"@sys.unit-length",
"@sys.unit-speed",
"@sys.unit-information",
"@sys.percentage",
"@sys.temperature",
"@sys.duration",
"@sys.age",
"@sys.currency-name",
"@sys.unit-area-name",
"@sys.unit-length-name",
"@sys.unit-speed-name",
"@sys.unit-volume-name",
"@sys.unit-weight-name",
"@sys.unit-information-name",
"@sys.address",
"@sys.zip-code",
"@sys.geo-capital",
"@sys.geo-country",
"@sys.geo-country-code",
"@sys.geo-city",
"@sys.geo-state",
"@sys.geo-city",
"@sys.geo-state",
"@sys.place-attraction",
"@sys.airport",
"@sys.location",
"@sys.given-name",
"@sys.last-name",
"@sys.person",
"@sys.music-artist",
"@sys.music-genre",
"@sys.color",
"@sys.language",
"@sys.any",
]
def __init__(self, dialogflow_project_directory, mindmeld_project_directory):
if os.path.exists(os.path.dirname(dialogflow_project_directory)):
self.dialogflow_project_directory = dialogflow_project_directory
self.mindmeld_project_directory = mindmeld_project_directory
self.directory = os.path.dirname(os.path.realpath(__file__))
self.entities_list = set()
self.intents_list = set()
else:
msg = "`{dialogflow_project_directory}` does not exist. Please verify."
msg = msg.format(dialogflow_project_directory=dialogflow_project_directory)
raise FileNotFoundError(msg)
def create_mindmeld_directory(self):
self.create_directory(self.mindmeld_project_directory)
self.create_directory(os.path.join(self.mindmeld_project_directory, "data"))
self.create_directory(os.path.join(self.mindmeld_project_directory, "domains"))
self.create_directory(
os.path.join(self.mindmeld_project_directory, "domains", "general")
)
self.create_directory(os.path.join(self.mindmeld_project_directory, "entities"))
# =========================
# create training data (entities, intents)
# =========================
def _create_entities_directories(self, entities):
""" Creates directories + files for all languages/files.
Currently does not use meta data in entityName.json files (the keys in var entities).
"""
for languages in entities.values():
for sub in languages.values():
dialogflow_entity_file = os.path.join(
self.dialogflow_project_directory, "entities", sub + ".json"
)
mindmeld_entity_directory_name = self.clean_check(
sub, self.entities_list
)
mindmeld_entity_directory = os.path.join(
self.mindmeld_project_directory,
"entities",
mindmeld_entity_directory_name,
)
self.create_directory(mindmeld_entity_directory)
self._create_entity_file(
dialogflow_entity_file, mindmeld_entity_directory
)
@staticmethod
def _create_entity_file(dialogflow_entity_file, mindmeld_entity_directory):
source_en = open(dialogflow_entity_file, "r")
target_gazetteer = open(
os.path.join(mindmeld_entity_directory, "gazetteer.txt"), "w"
)
target_mapping = open(
os.path.join(mindmeld_entity_directory, "mapping.json"), "w"
)
datastore = json.load(source_en)
mapping_dict = {"entities": []}
for item in datastore:
new_dict = {}
while ("value" in item) and (item["value"] in item["synonyms"]):
item["synonyms"].remove(item["value"])
new_dict["whitelist"] = item["synonyms"]
new_dict["cname"] = item["value"]
mapping_dict["entities"].append(new_dict)
target_gazetteer.write(item["value"] + "\n")
json.dump(mapping_dict, target_mapping, ensure_ascii=False, indent=2)
source_en.close()
target_gazetteer.close()
target_mapping.close()
def _create_intents_directories(self, intents):
""" Creates directories + files for all languages/files."""
for languages in intents.values():
for language, sub in languages.items():
dialogflow_intent_file = os.path.join(
self.dialogflow_project_directory, "intents", sub + ".json"
)
mindmeld_intent_directory_name = self.clean_check(
sub, self.intents_list
)
mindmeld_intent_directory = os.path.join(
self.mindmeld_project_directory,
"domains",
"general",
mindmeld_intent_directory_name,
)
self.create_directory(mindmeld_intent_directory)
self._create_intent_file(
dialogflow_intent_file, mindmeld_intent_directory, language
)
def _create_intent_file(
self, dialogflow_intent_file, mindmeld_intent_directory, language
):
source_en = open(dialogflow_intent_file, "r")
target_test = open(os.path.join(mindmeld_intent_directory, "test.txt"), "w")
target_train = open(os.path.join(mindmeld_intent_directory, "train.txt"), "w")
datastore = json.load(source_en)
all_text = []
for usersay in datastore:
sentence = ""
for texts in usersay["data"]:
df_text = texts["text"]
if "meta" in texts and texts["meta"] != "@sys.ignore":
df_meta = texts["meta"]
if re.match(
"(@sys.).+", df_meta
): # if text is a dialogflow sys entity
if df_meta in DialogflowConverter.sys_entity_map:
mm_meta = DialogflowConverter.sys_entity_map[df_meta]
else:
mm_meta = "[DNE: {sysEntity}]".format(sysEntity=df_meta[1:])
logger.info(
"Unfortunately mindmeld does not currently support"
"%s as a sys entity."
"Please create an entity for this.",
df_meta[1:],
)
entity_type = self.clean_name(mm_meta) + "_entries_" + language
part = "{" + df_text + "|" + entity_type + "}"
else:
entity_type = (
self.clean_name(df_meta[1:]) + "_entries_" + language
)
part = "{" + df_text + "|" + entity_type + "}"
else:
part = df_text
sentence += part
all_text.append(sentence)
train, test = train_test_split(all_text, test_size=0.2)
target_test.write("\n".join(test))
target_train.write("\n".join(train))
source_en.close()
target_test.close()
target_train.close()
def _get_file_names(self, level):
""" Gets the names of the entities from Dialogflow as a dictionary.
levels (str): either "entities" or "intents"
ex. if we had the following files in our entities directory:
["test.json", "test_entries_en.json", "test_entries_de.json"]
it returns:
{'test': {'en': 'test_entries_en', 'de': 'test_entries_de'}} """
directory = os.path.join(self.dialogflow_project_directory, level)
files = os.listdir(directory)
w = {"entities": "entries", "intents": "usersays"}
p = r".+(?<=(_" + w[level] + "_))(.*)(?=(.json))"
info = {}
for name in files:
match = re.match(p, name)
if match:
isbase = False
base = name[: match.start(1)]
language = match.group(2)
else:
isbase = True
base = name[:-5]
if base not in info:
info[base] = {}
if not isbase:
info[base][language] = name[:-5]
return info
def create_mindmeld_training_data(self):
entities = self._get_file_names("entities")
self._create_entities_directories(entities)
intents = self._get_file_names("intents")
self._create_intents_directories(intents)
# =========================
# create init
# =========================
@staticmethod
def create_handle(params):
return "@app.handle(" + params + ")"
@staticmethod
def create_header(function_name):
return "def " + function_name + "(request, responder):"
@staticmethod
def create_function(handles, function_name, replies):
assert isinstance(handles, list)
result = ""
for handle in handles:
result += DialogflowConverter.create_handle(handle) + "\n"
result += DialogflowConverter.create_header(function_name) + "\n"
result += " " + "replies = {}".format(replies) + "\n"
result += " " + "responder.reply(replies)"
return result
@staticmethod
def clean_name(name):
""" Takes in a string and returns a valid folder name (no spaces, all lowercase)."""
name = re.sub(r"[^\w\s-]", "", name).strip().lower()
name = re.sub(r"[-\s]+", "_", name)
return name
def clean_check(self, name, lst):
""" Takes in a list of strings and a name.
Returns name cleaned if the cleaned name is not found in lst."""
cleaned = self.clean_name(name)
if cleaned not in lst:
lst.add(cleaned)
return cleaned
else:
logger.error(
"%s name has been created twice. Please ensure there "
"are no duplicate names in the dialogflow files and "
"filenames are valid (no spaces or special characters)",
cleaned,
)
def create_mindmeld_init(self):
with open(
os.path.join(self.mindmeld_project_directory, "__init__.py"), "w"
) as target:
begin_info = [
"# -*- coding: utf-8 -*-",
'"""This module contains the MindMeld application"""',
"from mindmeld import Application",
"app = Application(__name__)",
"__all__ = ['app']",
]
for info, spacing in zip(begin_info, [1, 2, 1, 1, 0]):
target.write(info + "\n" * spacing)
intents = self._get_file_names("intents")
for i, main in enumerate(intents.keys()):
df_main = os.path.join(
self.dialogflow_project_directory, "intents", main + ".json"
)
with open(df_main) as source:
if "usersays" in df_main:
logger.error(
"Please check if your intent file"
"names are correctly labeled."
)
datastore = json.load(source)
replies = []
for response in datastore["responses"]:
for message in response["messages"]:
language = message["lang"]
if "speech" in message:
data = message["speech"]
replies = data if isinstance(data, list) else [data]
if datastore["fallbackIntent"]:
function_name = "default" + "_" + language
if language == "en":
# TODO: support multiple defaults for languages
handles = [
"default=True",
"intent='unsupported'",
]
else:
handles = ["intent='unsupported'"]
else:
function_name = "renameMe" + str(i) + "_" + language
handles = [
"intent="
+ "'"
+ self.clean_name(datastore["name"])
+ "_usersays_"
+ language
+ "'"
]
target.write(
"\n\n\n"
+ self.create_function(
handles=handles,
function_name=function_name,
replies=replies,
)
)
target.write("\n")
# =========================
# convert project
# =========================
def convert_project(self):
""" Converts a Dialogflow project into a MindMeld project.
Dialogflow projects consist of entities and intents.
note on languages:
Dialogflow supports multiple languages and locales. They store their training
data for different languages in different files. So, the name of each training
file ends with a meta tag, two letters long for language, and an additional
two letters for dialect (if applicable). For example, a file ending in "_en-au"
indicates it's in English (Australia). Below we use "la" to represent this
meta tag.
entities folder contains:
entityName.json - Meta data about entityName for all languages.
entityName_entries_la.json - One for each language, contains entitiy mappings.
intents folder contain:
intentName.json - Contains rules, information about conversation flow, meta data.
Contains previously mentioned information and responses for all languages.
intentName_usersays_la.json - one for each language,
contains training data to recognize intentName
Limitations:
- The converter is unable to create an entity when it encounters an
unrecognized entity (an entity not defined under entities folder
or system entities), and labels such entities as DNE in training data.
- The converter currently does not automatically convert features like
slot filling, contexts, and follow-up intents. Users can still implement such
features and more.
- Information in agent.json are not copied over.
- There is no official support for different languages. Users can still
implement this. The converter is able to successfully convert dialogflow
bots that support multiple languages.
Mindmeld:
- Users can store data locally
- Users can build a knowledge base (currently beta in Dialogflow).
- Users can configure the machine learning models to best suit their needs.
- Users have more flexibility in defining their own features, including
ones like slot filling, contexts, and follow-up intents.
"""
logger.info("Converting project.")
# Create project directory with sub folders
self.create_mindmeld_directory()
# Transfer over test data from Dialogflow project and reformat to Mindmeld project
self.create_mindmeld_training_data()
file_loc = os.path.dirname(os.path.realpath(__file__))
self.create_config(self.mindmeld_project_directory, file_loc)
self.create_main(self.mindmeld_project_directory, file_loc)
self.create_mindmeld_init()
logger.info("Project converted.")
| 39.069767
| 97
| 0.541288
|
import json
import logging
import os
import re
from sklearn.model_selection import train_test_split
from mindmeld.converter.converter import Converter
logger = logging.getLogger(__name__)
class DialogflowConverter(Converter):
sys_entity_map = {
"@sys.date-time": "sys_interval",
"@sys.date": "sys_time",
"@sys.date-period": "sys_interval",
"@sys.time": "sys_time",
"@sys.time-period": "sys_duration",
"@sys.duration": "sys_duration",
"@sys.number": "sys_number",
"@sys.cardinal": "sys_number",
"@sys.ordinal": "sys_ordinal",
"@sys.unit-currency": "sys_amount-of-money",
"@sys.unit-volume": "sys_volume",
"@sys.email": "sys_email",
"@sys.phone-number": "sys_phone-number",
"@sys.url": "sys_url",
}
sys_entity_map_todo = [
"@sys.number-integer",
"@sys.number-sequence",
"@sys.flight-number",
"@sys.unit-area",
"@sys.unit-length",
"@sys.unit-speed",
"@sys.unit-information",
"@sys.percentage",
"@sys.temperature",
"@sys.duration",
"@sys.age",
"@sys.currency-name",
"@sys.unit-area-name",
"@sys.unit-length-name",
"@sys.unit-speed-name",
"@sys.unit-volume-name",
"@sys.unit-weight-name",
"@sys.unit-information-name",
"@sys.address",
"@sys.zip-code",
"@sys.geo-capital",
"@sys.geo-country",
"@sys.geo-country-code",
"@sys.geo-city",
"@sys.geo-state",
"@sys.geo-city",
"@sys.geo-state",
"@sys.place-attraction",
"@sys.airport",
"@sys.location",
"@sys.given-name",
"@sys.last-name",
"@sys.person",
"@sys.music-artist",
"@sys.music-genre",
"@sys.color",
"@sys.language",
"@sys.any",
]
def __init__(self, dialogflow_project_directory, mindmeld_project_directory):
if os.path.exists(os.path.dirname(dialogflow_project_directory)):
self.dialogflow_project_directory = dialogflow_project_directory
self.mindmeld_project_directory = mindmeld_project_directory
self.directory = os.path.dirname(os.path.realpath(__file__))
self.entities_list = set()
self.intents_list = set()
else:
msg = "`{dialogflow_project_directory}` does not exist. Please verify."
msg = msg.format(dialogflow_project_directory=dialogflow_project_directory)
raise FileNotFoundError(msg)
def create_mindmeld_directory(self):
self.create_directory(self.mindmeld_project_directory)
self.create_directory(os.path.join(self.mindmeld_project_directory, "data"))
self.create_directory(os.path.join(self.mindmeld_project_directory, "domains"))
self.create_directory(
os.path.join(self.mindmeld_project_directory, "domains", "general")
)
self.create_directory(os.path.join(self.mindmeld_project_directory, "entities"))
def _create_entities_directories(self, entities):
for languages in entities.values():
for sub in languages.values():
dialogflow_entity_file = os.path.join(
self.dialogflow_project_directory, "entities", sub + ".json"
)
mindmeld_entity_directory_name = self.clean_check(
sub, self.entities_list
)
mindmeld_entity_directory = os.path.join(
self.mindmeld_project_directory,
"entities",
mindmeld_entity_directory_name,
)
self.create_directory(mindmeld_entity_directory)
self._create_entity_file(
dialogflow_entity_file, mindmeld_entity_directory
)
@staticmethod
def _create_entity_file(dialogflow_entity_file, mindmeld_entity_directory):
source_en = open(dialogflow_entity_file, "r")
target_gazetteer = open(
os.path.join(mindmeld_entity_directory, "gazetteer.txt"), "w"
)
target_mapping = open(
os.path.join(mindmeld_entity_directory, "mapping.json"), "w"
)
datastore = json.load(source_en)
mapping_dict = {"entities": []}
for item in datastore:
new_dict = {}
while ("value" in item) and (item["value"] in item["synonyms"]):
item["synonyms"].remove(item["value"])
new_dict["whitelist"] = item["synonyms"]
new_dict["cname"] = item["value"]
mapping_dict["entities"].append(new_dict)
target_gazetteer.write(item["value"] + "\n")
json.dump(mapping_dict, target_mapping, ensure_ascii=False, indent=2)
source_en.close()
target_gazetteer.close()
target_mapping.close()
def _create_intents_directories(self, intents):
for languages in intents.values():
for language, sub in languages.items():
dialogflow_intent_file = os.path.join(
self.dialogflow_project_directory, "intents", sub + ".json"
)
mindmeld_intent_directory_name = self.clean_check(
sub, self.intents_list
)
mindmeld_intent_directory = os.path.join(
self.mindmeld_project_directory,
"domains",
"general",
mindmeld_intent_directory_name,
)
self.create_directory(mindmeld_intent_directory)
self._create_intent_file(
dialogflow_intent_file, mindmeld_intent_directory, language
)
def _create_intent_file(
self, dialogflow_intent_file, mindmeld_intent_directory, language
):
source_en = open(dialogflow_intent_file, "r")
target_test = open(os.path.join(mindmeld_intent_directory, "test.txt"), "w")
target_train = open(os.path.join(mindmeld_intent_directory, "train.txt"), "w")
datastore = json.load(source_en)
all_text = []
for usersay in datastore:
sentence = ""
for texts in usersay["data"]:
df_text = texts["text"]
if "meta" in texts and texts["meta"] != "@sys.ignore":
df_meta = texts["meta"]
if re.match(
"(@sys.).+", df_meta
):
if df_meta in DialogflowConverter.sys_entity_map:
mm_meta = DialogflowConverter.sys_entity_map[df_meta]
else:
mm_meta = "[DNE: {sysEntity}]".format(sysEntity=df_meta[1:])
logger.info(
"Unfortunately mindmeld does not currently support"
"%s as a sys entity."
"Please create an entity for this.",
df_meta[1:],
)
entity_type = self.clean_name(mm_meta) + "_entries_" + language
part = "{" + df_text + "|" + entity_type + "}"
else:
entity_type = (
self.clean_name(df_meta[1:]) + "_entries_" + language
)
part = "{" + df_text + "|" + entity_type + "}"
else:
part = df_text
sentence += part
all_text.append(sentence)
train, test = train_test_split(all_text, test_size=0.2)
target_test.write("\n".join(test))
target_train.write("\n".join(train))
source_en.close()
target_test.close()
target_train.close()
def _get_file_names(self, level):
directory = os.path.join(self.dialogflow_project_directory, level)
files = os.listdir(directory)
w = {"entities": "entries", "intents": "usersays"}
p = r".+(?<=(_" + w[level] + "_))(.*)(?=(.json))"
info = {}
for name in files:
match = re.match(p, name)
if match:
isbase = False
base = name[: match.start(1)]
language = match.group(2)
else:
isbase = True
base = name[:-5]
if base not in info:
info[base] = {}
if not isbase:
info[base][language] = name[:-5]
return info
def create_mindmeld_training_data(self):
entities = self._get_file_names("entities")
self._create_entities_directories(entities)
intents = self._get_file_names("intents")
self._create_intents_directories(intents)
@staticmethod
def create_handle(params):
return "@app.handle(" + params + ")"
@staticmethod
def create_header(function_name):
return "def " + function_name + "(request, responder):"
@staticmethod
def create_function(handles, function_name, replies):
assert isinstance(handles, list)
result = ""
for handle in handles:
result += DialogflowConverter.create_handle(handle) + "\n"
result += DialogflowConverter.create_header(function_name) + "\n"
result += " " + "replies = {}".format(replies) + "\n"
result += " " + "responder.reply(replies)"
return result
@staticmethod
def clean_name(name):
name = re.sub(r"[^\w\s-]", "", name).strip().lower()
name = re.sub(r"[-\s]+", "_", name)
return name
def clean_check(self, name, lst):
cleaned = self.clean_name(name)
if cleaned not in lst:
lst.add(cleaned)
return cleaned
else:
logger.error(
"%s name has been created twice. Please ensure there "
"are no duplicate names in the dialogflow files and "
"filenames are valid (no spaces or special characters)",
cleaned,
)
def create_mindmeld_init(self):
with open(
os.path.join(self.mindmeld_project_directory, "__init__.py"), "w"
) as target:
begin_info = [
"# -*- coding: utf-8 -*-",
'"""This module contains the MindMeld application"""',
"from mindmeld import Application",
"app = Application(__name__)",
"__all__ = ['app']",
]
for info, spacing in zip(begin_info, [1, 2, 1, 1, 0]):
target.write(info + "\n" * spacing)
intents = self._get_file_names("intents")
for i, main in enumerate(intents.keys()):
df_main = os.path.join(
self.dialogflow_project_directory, "intents", main + ".json"
)
with open(df_main) as source:
if "usersays" in df_main:
logger.error(
"Please check if your intent file"
"names are correctly labeled."
)
datastore = json.load(source)
replies = []
for response in datastore["responses"]:
for message in response["messages"]:
language = message["lang"]
if "speech" in message:
data = message["speech"]
replies = data if isinstance(data, list) else [data]
if datastore["fallbackIntent"]:
function_name = "default" + "_" + language
if language == "en":
handles = [
"default=True",
"intent='unsupported'",
]
else:
handles = ["intent='unsupported'"]
else:
function_name = "renameMe" + str(i) + "_" + language
handles = [
"intent="
+ "'"
+ self.clean_name(datastore["name"])
+ "_usersays_"
+ language
+ "'"
]
target.write(
"\n\n\n"
+ self.create_function(
handles=handles,
function_name=function_name,
replies=replies,
)
)
target.write("\n")
def convert_project(self):
logger.info("Converting project.")
self.create_mindmeld_directory()
self.create_mindmeld_training_data()
file_loc = os.path.dirname(os.path.realpath(__file__))
self.create_config(self.mindmeld_project_directory, file_loc)
self.create_main(self.mindmeld_project_directory, file_loc)
self.create_mindmeld_init()
logger.info("Project converted.")
| true
| true
|
f71a00fd7c45368e46d3c54f89b23447c46a85a7
| 406
|
py
|
Python
|
001085StepikPythonIntrO/Stepik001085PythonIntrOсh01p03_20200410.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001085StepikPythonIntrO/Stepik001085PythonIntrOсh01p03_20200410.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
001085StepikPythonIntrO/Stepik001085PythonIntrOсh01p03_20200410.py
|
SafonovMikhail/python_000577
|
739f764e80f1ca354386f00b8e9db1df8c96531d
|
[
"Apache-2.0"
] | null | null | null |
# print(int("a"))
print(int(995.23)) # отбрасывание дробной части
print(float(42)) # приведение к виду с плавающей точкой
print(2 ** 2018) # поддержка длинной арифметики
pow = str(2 ** 2018) # количество цифр
print(pow)
# for i in pow:
# print(pow(i))
print(len(pow))
print("Yin" + " " + "Yang")
print("because " * 42)
pow2=int((str(2) * 100)) ** 2
print(pow2)
print(str(2))
print(len(str(pow2)))
| 23.882353
| 56
| 0.642857
|
print(int(995.23))
print(float(42))
print(2 ** 2018)
pow = str(2 ** 2018)
print(pow)
print(len(pow))
print("Yin" + " " + "Yang")
print("because " * 42)
pow2=int((str(2) * 100)) ** 2
print(pow2)
print(str(2))
print(len(str(pow2)))
| true
| true
|
f71a011d3e1d15a38ea8652521b28f6d01d84fa7
| 23,145
|
py
|
Python
|
library.py
|
whitehead421/library
|
2d1d3ef50127560ad6da76b5763ff45bb6d25761
|
[
"MIT"
] | null | null | null |
library.py
|
whitehead421/library
|
2d1d3ef50127560ad6da76b5763ff45bb6d25761
|
[
"MIT"
] | null | null | null |
library.py
|
whitehead421/library
|
2d1d3ef50127560ad6da76b5763ff45bb6d25761
|
[
"MIT"
] | null | null | null |
import time
import string
import random
import os
from termcolor import colored
from collections import Counter
clean_the_screen = ("cls" if os.name == "nt" else "clear")
# Function for listing books with their full information.
def listBooks():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("Book is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("Book is in the library.", "green")
print("-" * 115)
print(f"Name: {nameBook} - Author: {nameAuthor} - Status: {checkOut} - ISBN: {numberISBN}\n")
# Function for showing the books those are checked out by students.
def listBooksChecked():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
a += 1
print("-" * 115)
print(f"Name: {nameBook} - Author: {nameAuthor} - ISBN: {numberISBN}\n")
if a == 0:
print("-" * 115)
print(colored("\tUhm..- Nobody reads books these days.\n", "blue"))
print("There is no checked out book. All the books are in the library.")
# Function for adding new books to library's data.
def addBook():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
isbn = input("Please enter the ISBN number: ")
nameBook = input("Please enter the name of book: ")
nameAuthor = input("Please enter the author name: ")
for i in lines:
splitted = i.split(",")
isbnBook = splitted[0]
nBook = splitted[1]
if isbn == isbnBook:
print(colored("There is already a book with this ISBN.", "red"))
print(f"\t{isbn} - {nBook}")
break
else:
print(colored("\nThe book succesfully added to the data.", "green"))
status = "F\n"
file = open("books.txt", "a+")
file.write(f"{isbn},{nameBook},{nameAuthor},{status}")
file.close()
# Function for searching books by their ISBN numbers in data.
def searchBookISBN():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
searchingISBN = input("Enter the ISBN number of book which you are looking for.\n> ")
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("is in the library.", "green")
if searchingISBN.upper() in numberISBN:
print("-" * 95)
print(colored(f"{numberISBN}", "blue"), "-", f"'{nameBook}' by {nameAuthor} {checkOut}")
print("-" * 95)
a += 1
if a == 0:
print("Sorry. There is no book with this ISBN number.")
# Function for searching books by their names in data.
def searchBookName():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
searchingName = input("Enter the name of book which you are looking for.\n> ")
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("Book is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("Book is in the library.", "green")
if searchingName.lower() in nameBook.lower():
a += 1
print(colored("-" * 95, "cyan"))
print(f"ISBN: {numberISBN} - Name : {nameBook} - Author: {nameAuthor} - Status: {checkOut}\n")
print(colored("-" * 95, "magenta"))
if a == 0:
print("Sorry. There is no book with this name.")
# Function for searching books by their authors' name in data.
def searchBookAuthor():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
searchingAuthor = input("Enter the author name which you are looking for: ")
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("Book is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("Book is in the library.", "green")
if searchingAuthor.lower() in nameAuthor.lower():
a += 1
print("-" * 95)
print(f"Author: {nameAuthor} - Name : {nameBook} - ISBN: {numberISBN} - Status: {checkOut}\n")
if a == 0:
print(colored("Sorry. There is no author with this name.", "red"))
# Function for generating tickets when checking out a book to check in book with.
# Possibility of 2.176.782.336 tickets.
def ticketGenerator(student_id, book_name):
chars = string.digits + string.ascii_uppercase
ticket = "".join(random.sample(chars, 6))
file = open("tickets.txt", "a+")
lines = file.readlines()
for i in lines:
splitted = i.split("-")
ticket2 = splitted[0]
if ticket == ticket2:
return ticketGenerator()
else:
file.write(f"{ticket}-{book_name}-{student_id}\n")
file.close()
return ticket
# Function for checking out books to students' data.
def checkOutBook():
file = open("books.txt", "rt")
dataBooksLines = file.readlines()
file.close()
file = open("students.txt", "r")
dataStudentsLines = file.readlines()
file.close()
dataCheckOut = open("checkouts.txt", "a")
bookToCheckOut = input("Please enter the ISBN number of book that you want to check out: ")
isBookToCheckOut = False
isBookToStudent = False
# Controlling if there is a book with this ISBN or not.
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
if bookToCheckOut == splitted[0]:
isBookToCheckOut = True
break
else:
print(colored("There is no book with this ISBN number.", "red"))
pass
if isBookToCheckOut == True:
bookToStudent = input("Please enter the student ID to check out: ")
for i in dataStudentsLines:
splitted = i.split(maxsplit= 1)
studentID = splitted[0]
studentName = splitted[1]
if bookToStudent == studentID:
isBookToStudent = True
break
else:
print(colored("There is no student with this ID. Try again.", "red"))
pass
if isBookToStudent == True:
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
nameBook = splitted[1]
nameAuthor = splitted[2]
checkOut = splitted[3]
if bookToCheckOut == numberISBN:
if checkOut == "T\n":
print(colored("Oops! This book is already checked out.", "red"))
else:
print(colored("Are you sure to check out this book?\n", "blue", "on_grey"))
print("ISBN:", colored(numberISBN, "blue"), "-", "Name :", colored(nameBook, "magenta", "on_grey"), "-", "Author:", colored(nameAuthor, "yellow"))
print(f"\nThis book will checked out to: " + colored(studentName, "white", "on_grey", attrs=['blink']))
verify = ""
while verify != "Y" or verify != "N" or verify != "y" or verify != "n":
verify = input("\nEnter Y or N\n" + colored("> ", "grey", attrs=['blink']))
if verify == "N" or verify == "n":
break
if verify == "Y" or verify == "y":
# Generating ticket and giving it to student.
ticketnumber = ticketGenerator(student_id= bookToStudent, book_name= nameBook)
os.system(clean_the_screen)
print(f"""
____/ \ / \____
/| ------------- | ----------- |\
||| ------------- | --->{colored(ticketnumber, "red", "on_cyan", attrs=['reverse', 'blink'])} |||
||| ------------- | ------------- |||
||| ------- ----- | --Here is---- |||
||| ------------- | -your-ticket--|||
||| ------------- | ----number.---|||
||| ------------ | --Use-it------|||
||| ------------- | -when-you--- |||
||| ------------- | -checking-in--|||
||| ------------- | ---the-book.--|||
||| ------------ | ------------- |||
|||_____________ | _____________|||
/_____/--------\\_//--------\_____\
""")
dataCheckOut.write(f"{numberISBN}-{ticketnumber}-{bookToStudent}-{nameBook}-{nameAuthor}\n")
dataCheckOut.close()
print(colored("\nThe book succesfully checked out to the student.", "green"))
# TO WRITE "T" ON BOOKS FILE WHEN CHANGED
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
nameBook = splitted[1]
nameAuthor = splitted[2]
checkOut = splitted[3]
if bookToCheckOut == numberISBN:
file = open("books.txt", "r")
content = file.read()
content = content.replace("{},{},{},{}".format(numberISBN, nameBook, nameAuthor, checkOut), "{},{},{},T\n".format(numberISBN, nameBook, nameAuthor))
file.close()
file = open("books.txt", "w")
file.write(content)
file.close()
break
# Function for listing students by their names with the books they checked out under their names.
def listStudents():
file = open("checkouts.txt", "r")
checkOutsLines = file.readlines()
file.close()
file = open("students.txt", "r")
studentsLines = file.readlines()
file.close()
file = open("checkins.txt", "r")
checkInsLines = file.readlines()
file.close()
isCheckInsLines = False
if len(checkInsLines) == 0:
isCheckInsLines = True
for i in studentsLines:
splitted = i.split()
sNumber = splitted[0]
sName = splitted[1]
sLastname = splitted[2]
print(colored("-" * 80, "grey"))
print(colored(f"{sName} {sLastname}", "blue"))
for x in checkOutsLines:
splitted = x.split("-")
nameBook = splitted[3]
scNumber = splitted[2]
ticket1 = splitted[1]
if isCheckInsLines:
if sNumber == scNumber:
print(colored("-" * 80, "grey"))
print(colored(f"\t-{nameBook}", "magenta", "on_grey"))
else:
for z in checkInsLines:
splitted = z.split("-")
ticket2 = splitted[1]
if ticket1 == ticket2:
break
else:
if sNumber == scNumber and ticket1 != ticket2:
print(colored("-" * 80, "grey"))
print(colored(f"\t-{nameBook}", "magenta", "on_grey"))
# Function for printing the top three most checked out books.
def topThreeBook():
file = open("checkouts.txt", "r")
checkoutsLines = file.readlines()
file.close()
file = open("books.txt", "r")
booksLines = file.readlines()
file.close()
isbns = []
for i in checkoutsLines:
splitted = i.split("-")
isbn = splitted[0]
isbns.append(isbn)
dictionary = Counter(isbns)
val_list = list(dictionary.values())
for i in range(3):
print("_" * 105)
if i == 0:
print(colored("THE MOST CHECKED OUT BOOK(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 1:
print(colored("THE SECOND MOST CHECKED OUT BOOK(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 2:
print(colored("THE THIRD MOST CHECKED OUT BOOK(S)!", "red", "on_yellow", attrs=['blink']))
try:
if len(val_list) != 0:
print("_" * 105)
print(colored(f"This/these book(s) has/have checked out for [{str(max(val_list))}] time(s)!", "cyan"))
print("_" * 105)
print("\n")
if val_list.count(max(val_list)) > 1:
for key, value in dictionary.items():
if max(val_list) == value:
for z in booksLines:
splitted2 = z.split(",")
bookISBN = splitted2[0]
bookName = splitted2[1]
if key == bookISBN:
key = bookName # key = isbn
print(key)
for i in range(val_list.count(max(val_list))):
val_list.remove(max(val_list))
elif val_list.count(max(val_list)) == 1:
for key, value in dictionary.items():
if max(val_list) == value:
for z in booksLines:
splitted2 = z.split(",")
bookISBN = splitted2[0]
bookName = splitted2[1]
if key == bookISBN:
key = bookName # key = isbn
print(key)
val_list.remove(max(val_list))
break
except:
print("There is no other books.")
# Function for printing top three students who checked out most.
def topThreeStudents():
dataCheckOut = open("checkouts.txt", "r")
dataCheckOutsLines = dataCheckOut.readlines()
dataCheckOut.close()
dataStudents = open("students.txt", "r")
dataStudentsLines = dataStudents.readlines()
dataStudents.close()
studentNumbers = []
for i in dataCheckOutsLines:
splitted = i.split("-")
stNumber = splitted[2]
studentNumbers.append(stNumber)
studentNumbers = Counter(studentNumbers)
val_list = list(studentNumbers.values())
for i in range(3):
print("_" * 105)
if i == 0:
print(colored("THE TOP #1 STUDENT(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 1:
print(colored("THE TOP #2 STUDENT(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 2:
print(colored("THE TOP #3 STUDENT(S)!", "red", "on_yellow", attrs=['blink']))
try:
if len(val_list) != 0:
print("_" * 105)
print(colored(f"This/these student(s) has/have checked out for [{str(max(val_list))}] time(s)!", "cyan"))
print("_" * 105)
print("\n")
if val_list.count(max(val_list)) > 1:
for key, value in studentNumbers.items():
if max(val_list) == value:
for z in dataStudentsLines:
splitted2 = z.split(maxsplit= 1)
sNumber = splitted2[0]
sName = splitted2[1]
if key == sNumber:
key = sName
print(key)
for i in range(val_list.count(max(val_list))):
val_list.remove(max(val_list))
elif val_list.count(max(val_list)) == 1:
for key, value in studentNumbers.items():
if max(val_list) == value:
for z in dataStudentsLines:
splitted2 = z.split(maxsplit= 1)
sNumber = splitted2[0]
sName = splitted2[1]
if key == sNumber:
key = sName
print(key)
val_list.remove(max(val_list))
break
except:
print("There is no other students who has checked out before.")
# Function for adding new students to data.
def addStudent():
file = open("students.txt", "r")
lines = file.readlines()
file.close()
numberStudent = input("Please enter the ID of a student to add.\n> ")
nameStudent = input("\nPlease enter the name of a student to add.\n> ")
for i in lines:
splitted = i.split(maxsplit= 1)
nStudent = splitted[0]
naStudent = splitted[1]
if numberStudent == nStudent:
print("This student ID is already exist.")
print(f"\t{nStudent} - {naStudent}")
break
else:
print(colored("\nThe student succesfully added to the data.", "green"))
file = open("students.txt", "a+")
file.write(f"{numberStudent} {nameStudent}\n")
file.close()
# Function for checking in a book with the ticket given when checked out.
def checkInBook():
ticket = input("Please enter the ticket to check in book.\n> ")
dataBooks = open("books.txt", "r")
dataBooksLines = dataBooks.readlines()
dataBooks.close()
file = open("checkouts.txt", "r")
checkoutsLines = file.readlines()
file.close()
a = 0
for i in checkoutsLines:
splitted = i.split("-")
isbn = splitted[0]
tNumber = splitted[1]
studentID = splitted[2]
nameBook = splitted[3]
if ticket == tNumber:
a += 1
print(colored("Thank you for bringing back the book!", "green"))
file = open("checkins.txt", "a")
file.write(f"The book in-{ticket}-came back.\n")
file.close()
# TO WRITE "F" ON BOOKS FILE WHEN CHANGED
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
nameBook = splitted[1]
nameAuthor = splitted[2]
checkOut = splitted[3]
if isbn == numberISBN:
file = open("books.txt", "r")
content = file.read()
content = content.replace("{},{},{},{}".format(numberISBN, nameBook, nameAuthor, checkOut), "{},{},{},F\n".format(numberISBN, nameBook, nameAuthor))
file.close()
file = open("books.txt", "w")
file.write(content)
file.close()
break
if a == 0:
print(colored(f"Sorry. There is no ticket as '{ticket}'.", "red"))
maxims = [
"'I have always imagined that Paradise will be a kind of a Library.' - Jorge Luis Borges ",
"'Nothing is pleasanter than exploring a library.' - Walter Savage Landor ",
"'The only thing that you absolutely have to know, is the location of the library.' - Albert Einstein",
"'When in doubt go to the library.' - J.K. Rowling ",
"'I have found the most valuable thing in my wallet is my library card.' - Laura Bush",
"'Google can bring you back 100,000 answers, a librarian can bring you back the right one.' - Neil Gaiman",
"'The most important asset of any library goes home at night – the library staff.' - Timothy Healy",
"'Librarians are tour-guides for all of knowledge.' - Patrick Ness",
]
slider = colored("-" * 48, "red")
version = colored("library.py-v1.0", "green")
menu = f"""{version}
{random.choice(maxims)}
.--. .---.
.---|__| .-. |~~~|
.--|===|--|_ |_| |~~~|--.
| |===| |'\ .---!~| .--| |--|
|%%| | |.'\ |===| |--|%%| | |
|%%| | |\.'\ | | |__| | | |
| | | | \ \ |===| |==| | | |
| | |__| \.'\ | |_|__| |~~~|__|
| |===|--| \.'\|===|~|--|%%|~~~|--|
^--^---'--^ `-'`---^-^--^--^---'--'
{colored("HELLO FROM WORLD LIBRARY!", "white", "on_blue", attrs=['blink'])}
{colored("[1]", "blue")} List all the books in the library.
{colored("[2]", "blue")} List all the books those are checked out.
{colored("[3]", "blue")} Add a new book.
{colored("[4]", "blue")} Search a book by ISBN number.
{colored("[5]", "blue")} Search a book by name.
{colored("[6]", "blue")} Check out a book to a student.
{colored("[7]", "blue")} List all the students.
{slider}
{colored("[8] List top 3 most checked out books.", "cyan", attrs=['blink'])}
{colored("[9] List top 3 student.", "cyan", attrs=['blink'])}
{slider}
{colored("[10]", "blue")} Add new student.
{colored("[11]", "blue")} Search an author by name.
{colored("[12]", "blue")} Check in a book to a library.
{slider}
{colored("[0]", "red")} Exit
"""
password = "123456"
def login():
os.system(clean_the_screen)
print(colored("""
____________________________________________________
|____________________________________________________|
| __ __ ____ ___ || ____ ____ _ __ |
|| |__ |--|_| || |_| |||_|**|*|__|+|+||___| || | |
||==|^^||--| |=||=| |=*=||| |~~|~| |=|=|| | |~||==| |
|| |##|| | | || | | |||-| | |==|+|+||-|-|~||__| |
||__|__||__|_|_||_|_|___|||_|__|_|__|_|_||_|_|_||__|_|
||_______________________||__________________________|
| _____________________ || __ __ _ __ _ |
||=|=|=|=|=|=|=|=|=|=|=| __..\/ | |_| ||#||==| / /|
|| | | | | | | | | | | |/\ \ \\|++|=| || ||==| / / |
||_|_|_|_|_|_|_|_|_|_|_/_/\_.___\__|_|__||_||__|/_/__|
|____________________ /\~()/()~//\ __________________|
| __ __ _ _ \_ (_ . _/ _ ___ _____|
||~~|_|..|__| || |_ _ \ //\\ / |=|__|~|~|___| | | |
||--|+|^^|==| || | | |__/\ __ /\__| |==|x|x|+|+|=|=|=|
||__|_|__|__|_||_|_| / \ \ / / \_|__|_|_|_|_|_|_|_|
|_________________ _/ \/\/\/ \_ _______________|
| _____ _ __ |/ \../ \| __ __ ___|
||_____|_| |_|##|_|| | \/ __| ||_|==|_|++|_|-|||
||______||=|#|--| |\ \ o / /| | |~| | | |||
||______||_|_|__|_|_\ \ o / /_|_|__|_|__|_|_|||
|_________ __________\___\____/___/___________ ______|
|__ _ / ________ ______ /| _ _ _|
|\ \ |=|/ // /| // / / / | / ||%|%|%|
| \/\ |*/ .//____//.// /__/__/ (_) / ||=|=|=|
__| \/\|/ /(____|/ // / /||~|~|~|__
|___\_/ /________// ________ / / ||_|_|_|
|___ / (|________/ |\_______\ / /| |______|
/ \|________) / / | |
""", "yellow"))
login = input("Please enter the password to log in.\n> ")
if password == login:
print(colored("Succesfully logged in!", "green", attrs=['reverse', 'blink']))
time.sleep(2)
global isLogIn
isLogIn = True
else:
print(colored("Wrong password!", "red", attrs=['reverse', 'blink']))
print("Exiting...")
time.sleep(2)
os.system(clean_the_screen)
exit()
enterToGo = colored("Press 'Enter' to continue to the menu...", "white", "on_grey", attrs=['blink'])
if True:
isLogIn = False
login()
while isLogIn:
os.system(clean_the_screen)
print(menu)
choice = input("What would you like to do?\n> ")
choice_list = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "10", "11", "12"]
if choice in choice_list:
if choice == "1":
os.system(clean_the_screen)
listBooks()
print("-" * 112)
input(enterToGo)
elif choice == "2":
os.system(clean_the_screen)
listBooksChecked()
print("-" * 115)
input(enterToGo)
elif choice == "3":
os.system(clean_the_screen)
addBook()
input(enterToGo)
elif choice == "4":
os.system(clean_the_screen)
searchBookISBN()
input(enterToGo)
elif choice == "5":
os.system(clean_the_screen)
searchBookName()
input(enterToGo)
elif choice == "6":
os.system(clean_the_screen)
checkOutBook()
input(enterToGo)
elif choice == "7":
os.system(clean_the_screen)
listStudents()
print("-" * 80)
input(enterToGo)
elif choice == "8":
os.system(clean_the_screen)
topThreeBook()
print("-" * 80)
input(enterToGo)
elif choice == "9":
os.system(clean_the_screen)
topThreeStudents()
print("-" * 80)
input(enterToGo)
elif choice == "10":
os.system(clean_the_screen)
addStudent()
print("-" * 80)
input(enterToGo)
elif choice == "11":
os.system(clean_the_screen)
searchBookAuthor()
print("-" * 80)
input(enterToGo)
elif choice == "12":
os.system(clean_the_screen)
checkInBook()
print("-" * 80)
input(enterToGo)
elif choice == "0":
print("Saving all the changes...")
time.sleep(3)
os.system(clean_the_screen)
print("See you soon!\n")
exit()
else:
print("Please enter a number in menu. (1-12)")
input(enterToGo)
| 32.87642
| 158
| 0.572391
|
import time
import string
import random
import os
from termcolor import colored
from collections import Counter
clean_the_screen = ("cls" if os.name == "nt" else "clear")
def listBooks():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("Book is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("Book is in the library.", "green")
print("-" * 115)
print(f"Name: {nameBook} - Author: {nameAuthor} - Status: {checkOut} - ISBN: {numberISBN}\n")
def listBooksChecked():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
a += 1
print("-" * 115)
print(f"Name: {nameBook} - Author: {nameAuthor} - ISBN: {numberISBN}\n")
if a == 0:
print("-" * 115)
print(colored("\tUhm..- Nobody reads books these days.\n", "blue"))
print("There is no checked out book. All the books are in the library.")
def addBook():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
isbn = input("Please enter the ISBN number: ")
nameBook = input("Please enter the name of book: ")
nameAuthor = input("Please enter the author name: ")
for i in lines:
splitted = i.split(",")
isbnBook = splitted[0]
nBook = splitted[1]
if isbn == isbnBook:
print(colored("There is already a book with this ISBN.", "red"))
print(f"\t{isbn} - {nBook}")
break
else:
print(colored("\nThe book succesfully added to the data.", "green"))
status = "F\n"
file = open("books.txt", "a+")
file.write(f"{isbn},{nameBook},{nameAuthor},{status}")
file.close()
# Function for searching books by their ISBN numbers in data.
def searchBookISBN():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
searchingISBN = input("Enter the ISBN number of book which you are looking for.\n> ")
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("is in the library.", "green")
if searchingISBN.upper() in numberISBN:
print("-" * 95)
print(colored(f"{numberISBN}", "blue"), "-", f"'{nameBook}' by {nameAuthor} {checkOut}")
print("-" * 95)
a += 1
if a == 0:
print("Sorry. There is no book with this ISBN number.")
# Function for searching books by their names in data.
def searchBookName():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
searchingName = input("Enter the name of book which you are looking for.\n> ")
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("Book is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("Book is in the library.", "green")
if searchingName.lower() in nameBook.lower():
a += 1
print(colored("-" * 95, "cyan"))
print(f"ISBN: {numberISBN} - Name : {nameBook} - Author: {nameAuthor} - Status: {checkOut}\n")
print(colored("-" * 95, "magenta"))
if a == 0:
print("Sorry. There is no book with this name.")
# Function for searching books by their authors' name in data.
def searchBookAuthor():
file = open("books.txt", "r")
lines = file.readlines()
file.close()
searchingAuthor = input("Enter the author name which you are looking for: ")
a = 0
for i in lines:
splitted = i.split(",")
numberISBN = colored(f"{splitted[0]}", "blue")
nameBook = colored(f"{splitted[1]}", "magenta", "on_grey")
nameAuthor = colored(f"{splitted[2]}", "yellow")
checkOut = splitted[3]
if checkOut == "T\n":
checkOut = colored("Book is not in the library.", "red")
if checkOut == "F\n":
checkOut = colored("Book is in the library.", "green")
if searchingAuthor.lower() in nameAuthor.lower():
a += 1
print("-" * 95)
print(f"Author: {nameAuthor} - Name : {nameBook} - ISBN: {numberISBN} - Status: {checkOut}\n")
if a == 0:
print(colored("Sorry. There is no author with this name.", "red"))
def ticketGenerator(student_id, book_name):
chars = string.digits + string.ascii_uppercase
ticket = "".join(random.sample(chars, 6))
file = open("tickets.txt", "a+")
lines = file.readlines()
for i in lines:
splitted = i.split("-")
ticket2 = splitted[0]
if ticket == ticket2:
return ticketGenerator()
else:
file.write(f"{ticket}-{book_name}-{student_id}\n")
file.close()
return ticket
def checkOutBook():
file = open("books.txt", "rt")
dataBooksLines = file.readlines()
file.close()
file = open("students.txt", "r")
dataStudentsLines = file.readlines()
file.close()
dataCheckOut = open("checkouts.txt", "a")
bookToCheckOut = input("Please enter the ISBN number of book that you want to check out: ")
isBookToCheckOut = False
isBookToStudent = False
# Controlling if there is a book with this ISBN or not.
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
if bookToCheckOut == splitted[0]:
isBookToCheckOut = True
break
else:
print(colored("There is no book with this ISBN number.", "red"))
pass
if isBookToCheckOut == True:
bookToStudent = input("Please enter the student ID to check out: ")
for i in dataStudentsLines:
splitted = i.split(maxsplit= 1)
studentID = splitted[0]
studentName = splitted[1]
if bookToStudent == studentID:
isBookToStudent = True
break
else:
print(colored("There is no student with this ID. Try again.", "red"))
pass
if isBookToStudent == True:
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
nameBook = splitted[1]
nameAuthor = splitted[2]
checkOut = splitted[3]
if bookToCheckOut == numberISBN:
if checkOut == "T\n":
print(colored("Oops! This book is already checked out.", "red"))
else:
print(colored("Are you sure to check out this book?\n", "blue", "on_grey"))
print("ISBN:", colored(numberISBN, "blue"), "-", "Name :", colored(nameBook, "magenta", "on_grey"), "-", "Author:", colored(nameAuthor, "yellow"))
print(f"\nThis book will checked out to: " + colored(studentName, "white", "on_grey", attrs=['blink']))
verify = ""
while verify != "Y" or verify != "N" or verify != "y" or verify != "n":
verify = input("\nEnter Y or N\n" + colored("> ", "grey", attrs=['blink']))
if verify == "N" or verify == "n":
break
if verify == "Y" or verify == "y":
# Generating ticket and giving it to student.
ticketnumber = ticketGenerator(student_id= bookToStudent, book_name= nameBook)
os.system(clean_the_screen)
print(f"""
____/ \ / \____
/| ------------- | ----------- |\
||| ------------- | --->{colored(ticketnumber, "red", "on_cyan", attrs=['reverse', 'blink'])} |||
||| ------------- | ------------- |||
||| ------- ----- | --Here is---- |||
||| ------------- | -your-ticket--|||
||| ------------- | ----number.---|||
||| ------------ | --Use-it------|||
||| ------------- | -when-you--- |||
||| ------------- | -checking-in--|||
||| ------------- | ---the-book.--|||
||| ------------ | ------------- |||
|||_____________ | _____________|||
/_____/--------\\_//--------\_____\
""")
dataCheckOut.write(f"{numberISBN}-{ticketnumber}-{bookToStudent}-{nameBook}-{nameAuthor}\n")
dataCheckOut.close()
print(colored("\nThe book succesfully checked out to the student.", "green"))
# TO WRITE "T" ON BOOKS FILE WHEN CHANGED
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
nameBook = splitted[1]
nameAuthor = splitted[2]
checkOut = splitted[3]
if bookToCheckOut == numberISBN:
file = open("books.txt", "r")
content = file.read()
content = content.replace("{},{},{},{}".format(numberISBN, nameBook, nameAuthor, checkOut), "{},{},{},T\n".format(numberISBN, nameBook, nameAuthor))
file.close()
file = open("books.txt", "w")
file.write(content)
file.close()
break
# Function for listing students by their names with the books they checked out under their names.
def listStudents():
file = open("checkouts.txt", "r")
checkOutsLines = file.readlines()
file.close()
file = open("students.txt", "r")
studentsLines = file.readlines()
file.close()
file = open("checkins.txt", "r")
checkInsLines = file.readlines()
file.close()
isCheckInsLines = False
if len(checkInsLines) == 0:
isCheckInsLines = True
for i in studentsLines:
splitted = i.split()
sNumber = splitted[0]
sName = splitted[1]
sLastname = splitted[2]
print(colored("-" * 80, "grey"))
print(colored(f"{sName} {sLastname}", "blue"))
for x in checkOutsLines:
splitted = x.split("-")
nameBook = splitted[3]
scNumber = splitted[2]
ticket1 = splitted[1]
if isCheckInsLines:
if sNumber == scNumber:
print(colored("-" * 80, "grey"))
print(colored(f"\t-{nameBook}", "magenta", "on_grey"))
else:
for z in checkInsLines:
splitted = z.split("-")
ticket2 = splitted[1]
if ticket1 == ticket2:
break
else:
if sNumber == scNumber and ticket1 != ticket2:
print(colored("-" * 80, "grey"))
print(colored(f"\t-{nameBook}", "magenta", "on_grey"))
# Function for printing the top three most checked out books.
def topThreeBook():
file = open("checkouts.txt", "r")
checkoutsLines = file.readlines()
file.close()
file = open("books.txt", "r")
booksLines = file.readlines()
file.close()
isbns = []
for i in checkoutsLines:
splitted = i.split("-")
isbn = splitted[0]
isbns.append(isbn)
dictionary = Counter(isbns)
val_list = list(dictionary.values())
for i in range(3):
print("_" * 105)
if i == 0:
print(colored("THE MOST CHECKED OUT BOOK(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 1:
print(colored("THE SECOND MOST CHECKED OUT BOOK(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 2:
print(colored("THE THIRD MOST CHECKED OUT BOOK(S)!", "red", "on_yellow", attrs=['blink']))
try:
if len(val_list) != 0:
print("_" * 105)
print(colored(f"This/these book(s) has/have checked out for [{str(max(val_list))}] time(s)!", "cyan"))
print("_" * 105)
print("\n")
if val_list.count(max(val_list)) > 1:
for key, value in dictionary.items():
if max(val_list) == value:
for z in booksLines:
splitted2 = z.split(",")
bookISBN = splitted2[0]
bookName = splitted2[1]
if key == bookISBN:
key = bookName # key = isbn
print(key)
for i in range(val_list.count(max(val_list))):
val_list.remove(max(val_list))
elif val_list.count(max(val_list)) == 1:
for key, value in dictionary.items():
if max(val_list) == value:
for z in booksLines:
splitted2 = z.split(",")
bookISBN = splitted2[0]
bookName = splitted2[1]
if key == bookISBN:
key = bookName # key = isbn
print(key)
val_list.remove(max(val_list))
break
except:
print("There is no other books.")
# Function for printing top three students who checked out most.
def topThreeStudents():
dataCheckOut = open("checkouts.txt", "r")
dataCheckOutsLines = dataCheckOut.readlines()
dataCheckOut.close()
dataStudents = open("students.txt", "r")
dataStudentsLines = dataStudents.readlines()
dataStudents.close()
studentNumbers = []
for i in dataCheckOutsLines:
splitted = i.split("-")
stNumber = splitted[2]
studentNumbers.append(stNumber)
studentNumbers = Counter(studentNumbers)
val_list = list(studentNumbers.values())
for i in range(3):
print("_" * 105)
if i == 0:
print(colored("THE TOP #1 STUDENT(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 1:
print(colored("THE TOP #2 STUDENT(S)!", "red", "on_yellow", attrs=['blink']))
elif i == 2:
print(colored("THE TOP #3 STUDENT(S)!", "red", "on_yellow", attrs=['blink']))
try:
if len(val_list) != 0:
print("_" * 105)
print(colored(f"This/these student(s) has/have checked out for [{str(max(val_list))}] time(s)!", "cyan"))
print("_" * 105)
print("\n")
if val_list.count(max(val_list)) > 1:
for key, value in studentNumbers.items():
if max(val_list) == value:
for z in dataStudentsLines:
splitted2 = z.split(maxsplit= 1)
sNumber = splitted2[0]
sName = splitted2[1]
if key == sNumber:
key = sName
print(key)
for i in range(val_list.count(max(val_list))):
val_list.remove(max(val_list))
elif val_list.count(max(val_list)) == 1:
for key, value in studentNumbers.items():
if max(val_list) == value:
for z in dataStudentsLines:
splitted2 = z.split(maxsplit= 1)
sNumber = splitted2[0]
sName = splitted2[1]
if key == sNumber:
key = sName
print(key)
val_list.remove(max(val_list))
break
except:
print("There is no other students who has checked out before.")
# Function for adding new students to data.
def addStudent():
file = open("students.txt", "r")
lines = file.readlines()
file.close()
numberStudent = input("Please enter the ID of a student to add.\n> ")
nameStudent = input("\nPlease enter the name of a student to add.\n> ")
for i in lines:
splitted = i.split(maxsplit= 1)
nStudent = splitted[0]
naStudent = splitted[1]
if numberStudent == nStudent:
print("This student ID is already exist.")
print(f"\t{nStudent} - {naStudent}")
break
else:
print(colored("\nThe student succesfully added to the data.", "green"))
file = open("students.txt", "a+")
file.write(f"{numberStudent} {nameStudent}\n")
file.close()
# Function for checking in a book with the ticket given when checked out.
def checkInBook():
ticket = input("Please enter the ticket to check in book.\n> ")
dataBooks = open("books.txt", "r")
dataBooksLines = dataBooks.readlines()
dataBooks.close()
file = open("checkouts.txt", "r")
checkoutsLines = file.readlines()
file.close()
a = 0
for i in checkoutsLines:
splitted = i.split("-")
isbn = splitted[0]
tNumber = splitted[1]
studentID = splitted[2]
nameBook = splitted[3]
if ticket == tNumber:
a += 1
print(colored("Thank you for bringing back the book!", "green"))
file = open("checkins.txt", "a")
file.write(f"The book in-{ticket}-came back.\n")
file.close()
# TO WRITE "F" ON BOOKS FILE WHEN CHANGED
for i in dataBooksLines:
splitted = i.split(",")
numberISBN = splitted[0]
nameBook = splitted[1]
nameAuthor = splitted[2]
checkOut = splitted[3]
if isbn == numberISBN:
file = open("books.txt", "r")
content = file.read()
content = content.replace("{},{},{},{}".format(numberISBN, nameBook, nameAuthor, checkOut), "{},{},{},F\n".format(numberISBN, nameBook, nameAuthor))
file.close()
file = open("books.txt", "w")
file.write(content)
file.close()
break
if a == 0:
print(colored(f"Sorry. There is no ticket as '{ticket}'.", "red"))
maxims = [
"'I have always imagined that Paradise will be a kind of a Library.' - Jorge Luis Borges ",
"'Nothing is pleasanter than exploring a library.' - Walter Savage Landor ",
"'The only thing that you absolutely have to know, is the location of the library.' - Albert Einstein",
"'When in doubt go to the library.' - J.K. Rowling ",
"'I have found the most valuable thing in my wallet is my library card.' - Laura Bush",
"'Google can bring you back 100,000 answers, a librarian can bring you back the right one.' - Neil Gaiman",
"'The most important asset of any library goes home at night – the library staff.' - Timothy Healy",
"'Librarians are tour-guides for all of knowledge.' - Patrick Ness",
]
slider = colored("-" * 48, "red")
version = colored("library.py-v1.0", "green")
menu = f"""{version}
{random.choice(maxims)}
.--. .---.
.---|__| .-. |~~~|
.--|===|--|_ |_| |~~~|--.
| |===| |'\ .---!~| .--| |--|
|%%| | |.'\ |===| |--|%%| | |
|%%| | |\.'\ | | |__| | | |
| | | | \ \ |===| |==| | | |
| | |__| \.'\ | |_|__| |~~~|__|
| |===|--| \.'\|===|~|--|%%|~~~|--|
^--^---'--^ `-'`---^-^--^--^---'--'
{colored("HELLO FROM WORLD LIBRARY!", "white", "on_blue", attrs=['blink'])}
{colored("[1]", "blue")} List all the books in the library.
{colored("[2]", "blue")} List all the books those are checked out.
{colored("[3]", "blue")} Add a new book.
{colored("[4]", "blue")} Search a book by ISBN number.
{colored("[5]", "blue")} Search a book by name.
{colored("[6]", "blue")} Check out a book to a student.
{colored("[7]", "blue")} List all the students.
{slider}
{colored("[8] List top 3 most checked out books.", "cyan", attrs=['blink'])}
{colored("[9] List top 3 student.", "cyan", attrs=['blink'])}
{slider}
{colored("[10]", "blue")} Add new student.
{colored("[11]", "blue")} Search an author by name.
{colored("[12]", "blue")} Check in a book to a library.
{slider}
{colored("[0]", "red")} Exit
"""
password = "123456"
def login():
os.system(clean_the_screen)
print(colored("""
____________________________________________________
|____________________________________________________|
| __ __ ____ ___ || ____ ____ _ __ |
|| |__ |--|_| || |_| |||_|**|*|__|+|+||___| || | |
||==|^^||--| |=||=| |=*=||| |~~|~| |=|=|| | |~||==| |
|| |##|| | | || | | |||-| | |==|+|+||-|-|~||__| |
||__|__||__|_|_||_|_|___|||_|__|_|__|_|_||_|_|_||__|_|
||_______________________||__________________________|
| _____________________ || __ __ _ __ _ |
||=|=|=|=|=|=|=|=|=|=|=| __..\/ | |_| ||#||==| / /|
|| | | | | | | | | | | |/\ \ \\|++|=| || ||==| / / |
||_|_|_|_|_|_|_|_|_|_|_/_/\_.___\__|_|__||_||__|/_/__|
|____________________ /\~()/()~//\ __________________|
| __ __ _ _ \_ (_ . _/ _ ___ _____|
||~~|_|..|__| || |_ _ \ //\\ / |=|__|~|~|___| | | |
||--|+|^^|==| || | | |__/\ __ /\__| |==|x|x|+|+|=|=|=|
||__|_|__|__|_||_|_| / \ \ / / \_|__|_|_|_|_|_|_|_|
|_________________ _/ \/\/\/ \_ _______________|
| _____ _ __ |/ \../ \| __ __ ___|
||_____|_| |_|##|_|| | \/ __| ||_|==|_|++|_|-|||
||______||=|#|--| |\ \ o / /| | |~| | | |||
||______||_|_|__|_|_\ \ o / /_|_|__|_|__|_|_|||
|_________ __________\___\____/___/___________ ______|
|__ _ / ________ ______ /| _ _ _|
|\ \ |=|/ // /| // / / / | / ||%|%|%|
| \/\ |*/ .//____//.// /__/__/ (_) / ||=|=|=|
__| \/\|/ /(____|/ // / /||~|~|~|__
|___\_/ /________// ________ / / ||_|_|_|
|___ / (|________/ |\_______\ / /| |______|
/ \|________) / / | |
""", "yellow"))
login = input("Please enter the password to log in.\n> ")
if password == login:
print(colored("Succesfully logged in!", "green", attrs=['reverse', 'blink']))
time.sleep(2)
global isLogIn
isLogIn = True
else:
print(colored("Wrong password!", "red", attrs=['reverse', 'blink']))
print("Exiting...")
time.sleep(2)
os.system(clean_the_screen)
exit()
enterToGo = colored("Press 'Enter' to continue to the menu...", "white", "on_grey", attrs=['blink'])
if True:
isLogIn = False
login()
while isLogIn:
os.system(clean_the_screen)
print(menu)
choice = input("What would you like to do?\n> ")
choice_list = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "10", "11", "12"]
if choice in choice_list:
if choice == "1":
os.system(clean_the_screen)
listBooks()
print("-" * 112)
input(enterToGo)
elif choice == "2":
os.system(clean_the_screen)
listBooksChecked()
print("-" * 115)
input(enterToGo)
elif choice == "3":
os.system(clean_the_screen)
addBook()
input(enterToGo)
elif choice == "4":
os.system(clean_the_screen)
searchBookISBN()
input(enterToGo)
elif choice == "5":
os.system(clean_the_screen)
searchBookName()
input(enterToGo)
elif choice == "6":
os.system(clean_the_screen)
checkOutBook()
input(enterToGo)
elif choice == "7":
os.system(clean_the_screen)
listStudents()
print("-" * 80)
input(enterToGo)
elif choice == "8":
os.system(clean_the_screen)
topThreeBook()
print("-" * 80)
input(enterToGo)
elif choice == "9":
os.system(clean_the_screen)
topThreeStudents()
print("-" * 80)
input(enterToGo)
elif choice == "10":
os.system(clean_the_screen)
addStudent()
print("-" * 80)
input(enterToGo)
elif choice == "11":
os.system(clean_the_screen)
searchBookAuthor()
print("-" * 80)
input(enterToGo)
elif choice == "12":
os.system(clean_the_screen)
checkInBook()
print("-" * 80)
input(enterToGo)
elif choice == "0":
print("Saving all the changes...")
time.sleep(3)
os.system(clean_the_screen)
print("See you soon!\n")
exit()
else:
print("Please enter a number in menu. (1-12)")
input(enterToGo)
| true
| true
|
f71a0395c544caeb8e59eb6aa3e37e0cba7e4d34
| 325
|
py
|
Python
|
test.py
|
deancolten/buzzsprout-manager
|
a630ee39171b7086ac738e29b721b73c39a1581f
|
[
"MIT"
] | null | null | null |
test.py
|
deancolten/buzzsprout-manager
|
a630ee39171b7086ac738e29b721b73c39a1581f
|
[
"MIT"
] | null | null | null |
test.py
|
deancolten/buzzsprout-manager
|
a630ee39171b7086ac738e29b721b73c39a1581f
|
[
"MIT"
] | null | null | null |
from bsm import Manager, Episode, EpisodeGroup
from dotenv import load_dotenv
import os
load_dotenv()
ID = os.environ.get("ID")
TOKEN = os.environ.get("TOKEN")
manager = Manager(ID, TOKEN)
print(manager.test_api())
ep = Episode(**{'title': "test upload"})
res = manager.post_episode(ep, 'testfile.mp3', None)
print(res)
| 19.117647
| 52
| 0.723077
|
from bsm import Manager, Episode, EpisodeGroup
from dotenv import load_dotenv
import os
load_dotenv()
ID = os.environ.get("ID")
TOKEN = os.environ.get("TOKEN")
manager = Manager(ID, TOKEN)
print(manager.test_api())
ep = Episode(**{'title': "test upload"})
res = manager.post_episode(ep, 'testfile.mp3', None)
print(res)
| true
| true
|
f71a03a12dbb6d843747f75d2f29f96ad24a5738
| 13,861
|
py
|
Python
|
synapse/rest/media/v1/_base.py
|
Oliver-Hanikel/synapse
|
6276e685345cff0b1dc32a02354914a39da911f0
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/media/v1/_base.py
|
Oliver-Hanikel/synapse
|
6276e685345cff0b1dc32a02354914a39da911f0
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/media/v1/_base.py
|
Oliver-Hanikel/synapse
|
6276e685345cff0b1dc32a02354914a39da911f0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import urllib
from typing import Awaitable, Dict, Generator, List, Optional, Tuple
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
from twisted.web.http import Request
from synapse.api.errors import Codes, SynapseError, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii
logger = logging.getLogger(__name__)
# list all text content types that will have the charset default to UTF-8 when
# none is given
TEXT_CONTENT_TYPES = [
"text/css",
"text/csv",
"text/html",
"text/calendar",
"text/plain",
"text/javascript",
"application/json",
"application/ld+json",
"application/rtf",
"image/svg+xml",
"text/xml",
]
def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
try:
# This allows users to append e.g. /test.png to the URL. Useful for
# clients that parse the URL to see content type.
server_name, media_id = request.postpath[:2]
if isinstance(server_name, bytes):
server_name = server_name.decode("utf-8")
media_id = media_id.decode("utf8")
file_name = None
if len(request.postpath) > 2:
try:
file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8"))
except UnicodeDecodeError:
pass
return server_name, media_id, file_name
except Exception:
raise SynapseError(
404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
)
def respond_404(request: Request) -> None:
respond_with_json(
request,
404,
cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
send_cors=True,
)
async def respond_with_file(
request: Request,
media_type: str,
file_path: str,
file_size: Optional[int] = None,
upload_name: Optional[str] = None,
) -> None:
logger.debug("Responding with %r", file_path)
if os.path.isfile(file_path):
if file_size is None:
stat = os.stat(file_path)
file_size = stat.st_size
add_file_headers(request, media_type, file_size, upload_name)
with open(file_path, "rb") as f:
await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
finish_request(request)
else:
respond_404(request)
def add_file_headers(
request: Request,
media_type: str,
file_size: Optional[int],
upload_name: Optional[str],
) -> None:
"""Adds the correct response headers in preparation for responding with the
media.
Args:
request
media_type: The media/content type.
file_size: Size in bytes of the media, if known.
upload_name: The name of the requested file, if any.
"""
def _quote(x):
return urllib.parse.quote(x.encode("utf-8"))
# Default to a UTF-8 charset for text content types.
# ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
if media_type.lower() in TEXT_CONTENT_TYPES:
content_type = media_type + "; charset=UTF-8"
else:
content_type = media_type
request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
if upload_name:
# RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
#
# `filename` is defined to be a `value`, which is defined by RFC2616
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
# US-ASCII string surrounded by double-quotes, using backslash as an
# escape charater. Note that %-encoding is *not* permitted.
#
# `filename*` is defined to be an `ext-value`, which is defined in
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
# where `value-chars` is essentially a %-encoded string in the given charset.
#
# [1]: https://tools.ietf.org/html/rfc6266#section-4.1
# [2]: https://tools.ietf.org/html/rfc2616#section-3.6
# [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
# We avoid the quoted-string version of `filename`, because (a) synapse didn't
# correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
# may as well just do the filename* version.
if _can_encode_filename_as_token(upload_name):
disposition = "inline; filename=%s" % (upload_name,)
else:
disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
# cache for at least a day.
# XXX: we might want to turn this off for data we don't want to
# recommend caching as it's sensitive or private - or at least
# select private. don't bother setting Expires as all our
# clients are smart enough to be happy with Cache-Control
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
if file_size is not None:
request.setHeader(b"Content-Length", b"%d" % (file_size,))
# Tell web crawlers to not index, archive, or follow links in media. This
# should help to prevent things in the media repo from showing up in web
# search results.
request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
# separators as defined in RFC2616. SP and HT are handled separately.
# see _can_encode_filename_as_token.
_FILENAME_SEPARATOR_CHARS = {
"(",
")",
"<",
">",
"@",
",",
";",
":",
"\\",
'"',
"/",
"[",
"]",
"?",
"=",
"{",
"}",
}
def _can_encode_filename_as_token(x: str) -> bool:
for c in x:
# from RFC2616:
#
# token = 1*<any CHAR except CTLs or separators>
#
# separators = "(" | ")" | "<" | ">" | "@"
# | "," | ";" | ":" | "\" | <">
# | "/" | "[" | "]" | "?" | "="
# | "{" | "}" | SP | HT
#
# CHAR = <any US-ASCII character (octets 0 - 127)>
#
# CTL = <any US-ASCII control character
# (octets 0 - 31) and DEL (127)>
#
if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
return False
return True
async def respond_with_responder(
request: Request,
responder: "Optional[Responder]",
media_type: str,
file_size: Optional[int],
upload_name: Optional[str] = None,
) -> None:
"""Responds to the request with given responder. If responder is None then
returns 404.
Args:
request
responder
media_type: The media/content type.
file_size: Size in bytes of the media. If not known it should be None
upload_name: The name of the requested file, if any.
"""
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
if not responder:
respond_404(request)
return
logger.debug("Responding to media request with responder %s", responder)
add_file_headers(request, media_type, file_size, upload_name)
try:
with responder:
await responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
# Unregister the producer, if it has one, so Twisted doesn't complain
if request.producer:
request.unregisterProducer()
finish_request(request)
class Responder:
"""Represents a response that can be streamed to the requester.
Responder is a context manager which *must* be used, so that any resources
held can be cleaned up.
"""
def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
"""Stream response into consumer
Args:
consumer: The consumer to stream into.
Returns:
Resolves once the response has finished being written
"""
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class FileInfo:
"""Details about a requested/uploaded file.
Attributes:
server_name (str): The server name where the media originated from,
or None if local.
file_id (str): The local ID of the file. For local files this is the
same as the media_id
url_cache (bool): If the file is for the url preview cache
thumbnail (bool): Whether the file is a thumbnail or not.
thumbnail_width (int)
thumbnail_height (int)
thumbnail_method (str)
thumbnail_type (str): Content type of thumbnail, e.g. image/png
thumbnail_length (int): The size of the media file, in bytes.
"""
def __init__(
self,
server_name,
file_id,
url_cache=False,
thumbnail=False,
thumbnail_width=None,
thumbnail_height=None,
thumbnail_method=None,
thumbnail_type=None,
thumbnail_length=None,
):
self.server_name = server_name
self.file_id = file_id
self.url_cache = url_cache
self.thumbnail = thumbnail
self.thumbnail_width = thumbnail_width
self.thumbnail_height = thumbnail_height
self.thumbnail_method = thumbnail_method
self.thumbnail_type = thumbnail_type
self.thumbnail_length = thumbnail_length
def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
"""
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers: The HTTP request headers.
Returns:
The filename, or None.
"""
content_disposition = headers.get(b"Content-Disposition", [b""])
# No header, bail out.
if not content_disposition[0]:
return None
_, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
# We have a filename*= section. This MUST be ASCII, and any UTF-8
# bytes are %-quoted.
try:
# Once it is decoded, we can then unquote the %-encoded
# parts strictly into a unicode string.
upload_name = urllib.parse.unquote(
upload_name_utf8.decode("ascii"), errors="strict"
)
except UnicodeDecodeError:
# Incorrect UTF-8.
pass
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii.decode("ascii")
# This may be None here, indicating we did not find a matching name.
return upload_name
def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
"""Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line: header to be parsed
Returns:
The main content-type, followed by the parameter dictionary
"""
parts = _parseparam(b";" + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b"=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
# strip double-quotes
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parseparam(s: bytes) -> Generator[bytes, None, None]:
"""Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s: header to be parsed
Returns:
The split input
"""
while s[:1] == b";":
s = s[1:]
# look for the next ;
end = s.find(b";")
# if there is an odd number of " marks between here and the next ;, skip to the
# next ; instead
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
| 32.011547
| 88
| 0.611283
|
import logging
import os
import urllib
from typing import Awaitable, Dict, Generator, List, Optional, Tuple
from twisted.internet.interfaces import IConsumer
from twisted.protocols.basic import FileSender
from twisted.web.http import Request
from synapse.api.errors import Codes, SynapseError, cs_error
from synapse.http.server import finish_request, respond_with_json
from synapse.logging.context import make_deferred_yieldable
from synapse.util.stringutils import is_ascii
logger = logging.getLogger(__name__)
TEXT_CONTENT_TYPES = [
"text/css",
"text/csv",
"text/html",
"text/calendar",
"text/plain",
"text/javascript",
"application/json",
"application/ld+json",
"application/rtf",
"image/svg+xml",
"text/xml",
]
def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
try:
server_name, media_id = request.postpath[:2]
if isinstance(server_name, bytes):
server_name = server_name.decode("utf-8")
media_id = media_id.decode("utf8")
file_name = None
if len(request.postpath) > 2:
try:
file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8"))
except UnicodeDecodeError:
pass
return server_name, media_id, file_name
except Exception:
raise SynapseError(
404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
)
def respond_404(request: Request) -> None:
respond_with_json(
request,
404,
cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
send_cors=True,
)
async def respond_with_file(
request: Request,
media_type: str,
file_path: str,
file_size: Optional[int] = None,
upload_name: Optional[str] = None,
) -> None:
logger.debug("Responding with %r", file_path)
if os.path.isfile(file_path):
if file_size is None:
stat = os.stat(file_path)
file_size = stat.st_size
add_file_headers(request, media_type, file_size, upload_name)
with open(file_path, "rb") as f:
await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
finish_request(request)
else:
respond_404(request)
def add_file_headers(
request: Request,
media_type: str,
file_size: Optional[int],
upload_name: Optional[str],
) -> None:
def _quote(x):
return urllib.parse.quote(x.encode("utf-8"))
if media_type.lower() in TEXT_CONTENT_TYPES:
content_type = media_type + "; charset=UTF-8"
else:
content_type = media_type
request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
if upload_name:
correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
# may as well just do the filename* version.
if _can_encode_filename_as_token(upload_name):
disposition = "inline; filename=%s" % (upload_name,)
else:
disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
# cache for at least a day.
# XXX: we might want to turn this off for data we don't want to
# select private. don't bother setting Expires as all our
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
if file_size is not None:
request.setHeader(b"Content-Length", b"%d" % (file_size,))
request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
_FILENAME_SEPARATOR_CHARS = {
"(",
")",
"<",
">",
"@",
",",
";",
":",
"\\",
'"',
"/",
"[",
"]",
"?",
"=",
"{",
"}",
}
def _can_encode_filename_as_token(x: str) -> bool:
for c in x:
# from RFC2616:
#
# token = 1*<any CHAR except CTLs or separators>
#
# separators = "(" | ")" | "<" | ">" | "@"
# | "," | ";" | ":" | "\" | <">
if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
return False
return True
async def respond_with_responder(
request: Request,
responder: "Optional[Responder]",
media_type: str,
file_size: Optional[int],
upload_name: Optional[str] = None,
) -> None:
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
if not responder:
respond_404(request)
return
logger.debug("Responding to media request with responder %s", responder)
add_file_headers(request, media_type, file_size, upload_name)
try:
with responder:
await responder.write_to_consumer(request)
except Exception as e:
logger.warning("Failed to write to consumer: %s %s", type(e), e)
if request.producer:
request.unregisterProducer()
finish_request(request)
class Responder:
def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class FileInfo:
def __init__(
self,
server_name,
file_id,
url_cache=False,
thumbnail=False,
thumbnail_width=None,
thumbnail_height=None,
thumbnail_method=None,
thumbnail_type=None,
thumbnail_length=None,
):
self.server_name = server_name
self.file_id = file_id
self.url_cache = url_cache
self.thumbnail = thumbnail
self.thumbnail_width = thumbnail_width
self.thumbnail_height = thumbnail_height
self.thumbnail_method = thumbnail_method
self.thumbnail_type = thumbnail_type
self.thumbnail_length = thumbnail_length
def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
content_disposition = headers.get(b"Content-Disposition", [b""])
# No header, bail out.
if not content_disposition[0]:
return None
_, params = _parse_header(content_disposition[0])
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:]
# We have a filename*= section. This MUST be ASCII, and any UTF-8
# bytes are %-quoted.
try:
# Once it is decoded, we can then unquote the %-encoded
# parts strictly into a unicode string.
upload_name = urllib.parse.unquote(
upload_name_utf8.decode("ascii"), errors="strict"
)
except UnicodeDecodeError:
# Incorrect UTF-8.
pass
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii.decode("ascii")
return upload_name
def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
parts = _parseparam(b";" + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b"=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parseparam(s: bytes) -> Generator[bytes, None, None]:
while s[:1] == b";":
s = s[1:]
# look for the next ;
end = s.find(b";")
# if there is an odd number of " marks between here and the next ;, skip to the
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
| true
| true
|
f71a053bb305614ab4f994386a8208bfe513245c
| 1,996
|
py
|
Python
|
dataslots/__init__.py
|
cl0ne/dataslots
|
a91634f33e25c09e48e834a46424b9f80153efa3
|
[
"MIT"
] | null | null | null |
dataslots/__init__.py
|
cl0ne/dataslots
|
a91634f33e25c09e48e834a46424b9f80153efa3
|
[
"MIT"
] | null | null | null |
dataslots/__init__.py
|
cl0ne/dataslots
|
a91634f33e25c09e48e834a46424b9f80153efa3
|
[
"MIT"
] | null | null | null |
from dataclasses import fields
from warnings import warn
__all__ = ['dataslots', 'with_slots']
def with_slots(*args, **kwargs):
warn("Use dataslots decorator instead of with_slots", category=PendingDeprecationWarning, stacklevel=2)
return dataslots(*args, **kwargs)
def dataslots(_cls=None, *, add_dict=False, add_weakref=False):
"""
Decorator to add __slots__ to class created by dataclass. Returns new class object as it's not possible
to add __slots__ after class creation.
"""
def _slots_setstate(self, state):
for param_dict in filter(None, state):
for slot, value in param_dict.items():
object.__setattr__(self, slot, value)
def wrap(cls):
cls_dict = dict(cls.__dict__)
# Create only missing slots
inherited_slots = set().union(*(getattr(c, '__slots__', set()) for c in cls.mro()))
field_names = set(tuple(f.name for f in fields(cls)))
if add_dict:
field_names.add('__dict__')
if add_weakref:
field_names.add('__weakref__')
cls_dict['__slots__'] = tuple(field_names - inherited_slots)
# Erase filed names from class __dict__
for f in field_names:
cls_dict.pop(f, None)
# Erase __dict__ and __weakref__
cls_dict.pop('__dict__', None)
cls_dict.pop('__weakref__', None)
# Pickle fix for frozen dataclass as mentioned in https://bugs.python.org/issue36424
# Use only if __getstate__ and __setstate__ are not declared and frozen=True
if all(param not in cls_dict for param in ['__getstate__', '__setstate__']) and \
cls.__dataclass_params__.frozen:
cls_dict['__setstate__'] = _slots_setstate
# Prepare new class with slots
new_cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
new_cls.__qualname__ = getattr(cls, '__qualname__')
return new_cls
return wrap if _cls is None else wrap(_cls)
| 35.642857
| 107
| 0.657816
|
from dataclasses import fields
from warnings import warn
__all__ = ['dataslots', 'with_slots']
def with_slots(*args, **kwargs):
warn("Use dataslots decorator instead of with_slots", category=PendingDeprecationWarning, stacklevel=2)
return dataslots(*args, **kwargs)
def dataslots(_cls=None, *, add_dict=False, add_weakref=False):
def _slots_setstate(self, state):
for param_dict in filter(None, state):
for slot, value in param_dict.items():
object.__setattr__(self, slot, value)
def wrap(cls):
cls_dict = dict(cls.__dict__)
inherited_slots = set().union(*(getattr(c, '__slots__', set()) for c in cls.mro()))
field_names = set(tuple(f.name for f in fields(cls)))
if add_dict:
field_names.add('__dict__')
if add_weakref:
field_names.add('__weakref__')
cls_dict['__slots__'] = tuple(field_names - inherited_slots)
for f in field_names:
cls_dict.pop(f, None)
cls_dict.pop('__dict__', None)
cls_dict.pop('__weakref__', None)
if all(param not in cls_dict for param in ['__getstate__', '__setstate__']) and \
cls.__dataclass_params__.frozen:
cls_dict['__setstate__'] = _slots_setstate
new_cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
new_cls.__qualname__ = getattr(cls, '__qualname__')
return new_cls
return wrap if _cls is None else wrap(_cls)
| true
| true
|
f71a0540bc87f2ea7b4736b69e7e3edf50ca90fb
| 4,050
|
py
|
Python
|
benchmark/startQiskit_Class2296.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class2296.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class2296.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=33
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.rx(-3.1101767270538954,input_qubit[1]) # number=27
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=26
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[3]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=30
prog.cz(input_qubit[3],input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=23
prog.z(input_qubit[3]) # number=24
prog.cx(input_qubit[3],input_qubit[0]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[3]) # number=28
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2296.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.322034
| 140
| 0.647407
|
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.rx(-3.1101767270538954,input_qubit[1])
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.x(input_qubit[3])
prog.h(input_qubit[2])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.h(input_qubit[3])
prog.z(input_qubit[3])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.y(input_qubit[2])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2296.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true
| true
|
f71a06034e69e3e7408c6a1366ef06e34015e677
| 7,238
|
py
|
Python
|
complex_networks_keras_tf1/models/resnet_blocks_3d.py
|
QinggangSUN/keras_complex_valued_networks
|
e7a6c9238645e87a679328e9f8e8834ad0f716e2
|
[
"MIT"
] | 8
|
2020-11-29T11:50:04.000Z
|
2022-01-15T15:17:47.000Z
|
complex_networks_keras_tf1/models/resnet_blocks_3d.py
|
QinggangSUN/keras_complex_valued_networks
|
e7a6c9238645e87a679328e9f8e8834ad0f716e2
|
[
"MIT"
] | null | null | null |
complex_networks_keras_tf1/models/resnet_blocks_3d.py
|
QinggangSUN/keras_complex_valued_networks
|
e7a6c9238645e87a679328e9f8e8834ad0f716e2
|
[
"MIT"
] | 1
|
2021-11-29T08:22:17.000Z
|
2021-11-29T08:22:17.000Z
|
# -*- coding: utf-8 -*-
"""This module implements a number of popular two-dimensional complex valued residual blocks."""
# Authors: Qinggang Sun
#
# Reference:
# Allen Goodman, Allen Goodman, Claire McQuin, Hans Gaiser, et al. keras-resnet
# https://github.com/broadinstitute/keras-resnet
# pylint:disable=too-many-arguments, invalid-name, unused-argument
import keras.layers
import keras.regularizers
from ..layers.activations import layer_activation
from ..layers.bn import ComplexBatchNormalization
from ..layers.conv import ComplexConv3D
def basic_3d(filters,
stage=0,
block=0,
kernel_size=3,
numerical_name=False,
stride=None,
activation='crelu',
**kwargs,
):
"""
A two-dimensional basic block.
:param filters: int, the output’s feature space
:param stage: int, representing the stage of this block (starting from 0)
:param block: int, representing this block (starting from 0)
:param kernel_size: int or tuple/list of 2 integers, size of the kernel
:param numerical_name: bool, if true, uses numbers to represent blocks instead of chars (ResNet{18, 34})
:param stride: int, representing the stride used in the shortcut and the first conv layer,
default derives stride from block id
:param activation: str, the activation of convolution layer in residual blocks
Usage:
>>> from complex_networks_keras_tf1.models.resnet_models_3d import basic_3d
>>> basic_3d(64)
"""
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
axis = -1 if keras.backend.image_data_format() == "channels_last" else 1
if block > 0 and numerical_name:
block_char = f'b{block}'
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(inputs, **kwargs):
"""Method for block."""
outputs = keras.layers.ZeroPadding3D(padding=1, name=f'padding{stage_char}{block_char}_branch2a')(inputs)
outputs = ComplexConv3D(filters, kernel_size, strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2a', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2a')(outputs)
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_branch2a_{activation}')
outputs = keras.layers.ZeroPadding3D(padding=1, name=f'padding{stage_char}{block_char}_branch2b')(outputs)
outputs = ComplexConv3D(filters, kernel_size, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2b', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2b')(outputs)
if block == 0:
shortcut = ComplexConv3D(filters, (1, 1), strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch1', **kwargs)(inputs)
shortcut = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch1')(shortcut)
else:
shortcut = inputs
outputs = keras.layers.add([outputs, shortcut], name=f'res{stage_char}{block_char}')
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_{activation}')
return outputs
return f
def bottleneck_3d(filters,
stage=0,
block=0,
kernel_size=3,
numerical_name=False,
stride=None,
activation='crelu',
**kwargs,
):
"""
A two-dimensional bottleneck block.
:param filters: int, the output’s feature space
:param stage: int, representing the stage of this block (starting from 0)
:param block: int, representing this block (starting from 0)
:param kernel_size: int or tuple/list of 2 integers, size of the kernel
:param numerical_name: bool, if true, uses numbers to represent blocks instead of chars (ResNet{101, 152, 200})
:param stride: int, representing the stride used in the shortcut and the first conv layer,
default derives stride from block id
:param activation: str, the activation of convolution layer in residual blocks
Usage:
>>> from complex_networks_keras_tf1.models.resnet_models_3d import bottleneck_3d
>>> bottleneck_3d(64)
"""
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
axis = -1 if keras.backend.image_data_format() == "channels_last" else 1
if block > 0 and numerical_name:
block_char = f'b{block}'
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(inputs, **kwargs):
"""Method for block."""
outputs = ComplexConv3D(filters, 1, strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2a', **kwargs)(inputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2a')(outputs)
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_branch2a_{activation}')
outputs = keras.layers.ZeroPadding3D(padding=1, name=f'padding{stage_char}{block_char}_branch2b')(outputs)
outputs = ComplexConv3D(filters, kernel_size, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2b', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2b')(outputs)
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_branch2b_{activation}')
outputs = ComplexConv3D(filters*4, 1, strides=(1, 1), use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2c', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2c')(outputs)
if block == 0:
shortcut = ComplexConv3D(filters*4, (1, 1), strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch1', **kwargs)(inputs)
shortcut = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch1')(shortcut)
else:
shortcut = inputs
outputs = keras.layers.add([outputs, shortcut], name=f'res{stage_char}{block_char}')
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_{activation}')
return outputs
return f
| 36.555556
| 119
| 0.644515
|
import keras.layers
import keras.regularizers
from ..layers.activations import layer_activation
from ..layers.bn import ComplexBatchNormalization
from ..layers.conv import ComplexConv3D
def basic_3d(filters,
stage=0,
block=0,
kernel_size=3,
numerical_name=False,
stride=None,
activation='crelu',
**kwargs,
):
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
axis = -1 if keras.backend.image_data_format() == "channels_last" else 1
if block > 0 and numerical_name:
block_char = f'b{block}'
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(inputs, **kwargs):
outputs = keras.layers.ZeroPadding3D(padding=1, name=f'padding{stage_char}{block_char}_branch2a')(inputs)
outputs = ComplexConv3D(filters, kernel_size, strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2a', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2a')(outputs)
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_branch2a_{activation}')
outputs = keras.layers.ZeroPadding3D(padding=1, name=f'padding{stage_char}{block_char}_branch2b')(outputs)
outputs = ComplexConv3D(filters, kernel_size, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2b', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2b')(outputs)
if block == 0:
shortcut = ComplexConv3D(filters, (1, 1), strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch1', **kwargs)(inputs)
shortcut = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch1')(shortcut)
else:
shortcut = inputs
outputs = keras.layers.add([outputs, shortcut], name=f'res{stage_char}{block_char}')
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_{activation}')
return outputs
return f
def bottleneck_3d(filters,
stage=0,
block=0,
kernel_size=3,
numerical_name=False,
stride=None,
activation='crelu',
**kwargs,
):
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
axis = -1 if keras.backend.image_data_format() == "channels_last" else 1
if block > 0 and numerical_name:
block_char = f'b{block}'
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(inputs, **kwargs):
outputs = ComplexConv3D(filters, 1, strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2a', **kwargs)(inputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2a')(outputs)
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_branch2a_{activation}')
outputs = keras.layers.ZeroPadding3D(padding=1, name=f'padding{stage_char}{block_char}_branch2b')(outputs)
outputs = ComplexConv3D(filters, kernel_size, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2b', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2b')(outputs)
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_branch2b_{activation}')
outputs = ComplexConv3D(filters*4, 1, strides=(1, 1), use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch2c', **kwargs)(outputs)
outputs = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch2c')(outputs)
if block == 0:
shortcut = ComplexConv3D(filters*4, (1, 1), strides=stride, use_bias=False, spectral_parametrization=False,
name=f'res{stage_char}{block_char}_branch1', **kwargs)(inputs)
shortcut = ComplexBatchNormalization(
axis=axis, epsilon=1e-5, name=f'bn{stage_char}{block_char}_branch1')(shortcut)
else:
shortcut = inputs
outputs = keras.layers.add([outputs, shortcut], name=f'res{stage_char}{block_char}')
outputs = layer_activation(outputs, activation, name=f'res{stage_char}{block_char}_{activation}')
return outputs
return f
| true
| true
|
f71a06156e7e11289ee61b52977cfcf127cb084b
| 1,966
|
py
|
Python
|
test/docker/integration/kong_client.py
|
coolersport/kong-oidc
|
56393b4f4cca051d2ed9fdba145e679d03aab116
|
[
"Apache-2.0"
] | 3
|
2019-09-06T06:27:06.000Z
|
2020-03-28T03:22:24.000Z
|
test/docker/integration/kong_client.py
|
coolersport/kong-oidc
|
56393b4f4cca051d2ed9fdba145e679d03aab116
|
[
"Apache-2.0"
] | 1
|
2020-10-30T16:23:27.000Z
|
2020-10-30T16:23:27.000Z
|
test/docker/integration/kong_client.py
|
coolersport/kong-oidc
|
56393b4f4cca051d2ed9fdba145e679d03aab116
|
[
"Apache-2.0"
] | 5
|
2019-03-18T22:12:16.000Z
|
2022-03-03T22:05:06.000Z
|
import requests
class KongClient:
def __init__(self, url):
self._endpoint = url
self._session = requests.session()
def create_service(self, name, upstream_url):
url = "{}/services".format(self._endpoint)
payload = {
"name": name,
"url": upstream_url,
}
res = self._session.post(url, json=payload)
res.raise_for_status()
return res.json()
def create_route(self, service_name, paths):
url = "{}/services/{}/routes".format(self._endpoint, service_name)
payload = {
"paths": paths,
}
res = self._session.post(url, json=payload)
res.raise_for_status()
return res.json()
def create_plugin(self, plugin_name, service_name, config):
url = "{}/services/{}/plugins".format(self._endpoint, service_name)
payload = {
"name": plugin_name,
"config": config,
}
res = self._session.post(url, json=payload)
try:
res.raise_for_status()
except Exception as e:
print(res.text)
raise e
return res.json()
def delete_service(self, name):
try:
routes = self.get_routes(name)
for route in routes:
self.delete_route(route)
except requests.exceptions.HTTPError:
pass
url = "{}/services/{}".format(self._endpoint, name)
self._session.delete(url).raise_for_status()
def delete_route(self, route_id):
url = "{}/routes/{}".format(self._endpoint, route_id)
self._session.delete(url).raise_for_status()
def get_routes(self, service_name):
url = "{}/services/{}/routes".format(self._endpoint, service_name)
res = self._session.get(url)
res.raise_for_status()
return map(lambda x: x['id'], res.json()['data'])
| 32.766667
| 76
| 0.558494
|
import requests
class KongClient:
def __init__(self, url):
self._endpoint = url
self._session = requests.session()
def create_service(self, name, upstream_url):
url = "{}/services".format(self._endpoint)
payload = {
"name": name,
"url": upstream_url,
}
res = self._session.post(url, json=payload)
res.raise_for_status()
return res.json()
def create_route(self, service_name, paths):
url = "{}/services/{}/routes".format(self._endpoint, service_name)
payload = {
"paths": paths,
}
res = self._session.post(url, json=payload)
res.raise_for_status()
return res.json()
def create_plugin(self, plugin_name, service_name, config):
url = "{}/services/{}/plugins".format(self._endpoint, service_name)
payload = {
"name": plugin_name,
"config": config,
}
res = self._session.post(url, json=payload)
try:
res.raise_for_status()
except Exception as e:
print(res.text)
raise e
return res.json()
def delete_service(self, name):
try:
routes = self.get_routes(name)
for route in routes:
self.delete_route(route)
except requests.exceptions.HTTPError:
pass
url = "{}/services/{}".format(self._endpoint, name)
self._session.delete(url).raise_for_status()
def delete_route(self, route_id):
url = "{}/routes/{}".format(self._endpoint, route_id)
self._session.delete(url).raise_for_status()
def get_routes(self, service_name):
url = "{}/services/{}/routes".format(self._endpoint, service_name)
res = self._session.get(url)
res.raise_for_status()
return map(lambda x: x['id'], res.json()['data'])
| true
| true
|
f71a0625c1f550c878a13bf9475bc05dbf22e8a9
| 121
|
py
|
Python
|
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyFILTERSD/__init__.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | 3
|
2021-01-06T03:01:18.000Z
|
2022-03-21T03:02:55.000Z
|
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyFILTERSD/__init__.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | null | null | null |
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyFILTERSD/__init__.py
|
liujiamingustc/phd
|
4f815a738abad43531d02ac66f5bd0d9a1def52a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
try:
from pyFILTERSD import FILTERSD
__all__ = ['FILTERSD']
except:
__all__ = []
#end
| 13.444444
| 35
| 0.636364
|
try:
from pyFILTERSD import FILTERSD
__all__ = ['FILTERSD']
except:
__all__ = []
| true
| true
|
f71a062d2b5783e4fd92b44153a453460f29e699
| 53,902
|
py
|
Python
|
Lib/http/client.py
|
treebee/cpython
|
e152169da95b52fa41931572bc90857253c4a5dd
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2019-05-29T18:22:03.000Z
|
2019-05-29T18:22:03.000Z
|
Lib/http/client.py
|
treebee/cpython
|
e152169da95b52fa41931572bc90857253c4a5dd
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2022-03-30T01:50:22.000Z
|
2022-03-30T01:50:28.000Z
|
Lib/http/client.py
|
treebee/cpython
|
e152169da95b52fa41931572bc90857253c4a5dd
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|\_____________________________
| | getresponse() raises
| response = getresponse() | ConnectionError
v v
Unread-response Idle
[Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import email.parser
import email.message
import http
import io
import re
import socket
import collections.abc
from urllib.parse import urlsplit
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
"responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# hack to maintain backwards compatibility
globals().update(http.HTTPStatus.__members__)
# another hack to maintain backwards compatibility
# Mapping status codes to official W3C names
responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more lenient than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
# These characters are not allowed within HTTP URL paths.
# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
# Prevents CVE-2019-9740. Includes control characters such as \r\n.
# We don't restrict chars above \x7f as putrequest() limits us to ASCII.
_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
# Arguably only these _should_ allowed:
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
raise UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name)) from None
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr, val in self.headers.items():
print("header:", hdr + ":", val)
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
"""Always returns True"""
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b and return the number
of bytes read.
"""
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk, discard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
data = self.fp.read(amt)
if len(data) < amt:
raise IncompleteRead(data, amt-len(data))
return data
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
amt = len(b)
n = self.fp.readinto(b)
if n < amt:
raise IncompleteRead(bytes(b[:n]), amt-n)
return n
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
result = self.fp.read1(n)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
'''Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady.
'''
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
'''Returns an instance of the class mimetools.Message containing
meta-information associated with the URL.
When the method is HTTP, these headers are those returned by
the server at the head of the retrieved HTML page (including
Content-Length and Content-Type).
When the method is FTP, a Content-Length header will be
present if (as is now usual) the server passed back a file
length in response to the FTP retrieval request. A
Content-Type header will be present if the MIME type can be
guessed.
When the method is local-file, returned headers will include
a Date representing the file's last-modified time, a
Content-Length giving file size, and a Content-Type
containing a guess at the file's type. See also the
description of the mimetools module.
'''
return self.headers
def geturl(self):
'''Return the real URL of the page.
In some cases, the HTTP server redirects a client to another
URL. The urlopen() function handles this transparently, but in
some cases the caller needs to know which URL the client was
redirected to. The geturl() method can be used to get at this
redirected URL.
'''
return self.url
def getcode(self):
'''Return the HTTP status code that was sent with the response,
or None if the URL is not an HTTP URL.
'''
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
@staticmethod
def _is_textIO(stream):
"""Test whether a file-like object is a text or a binary stream.
"""
return isinstance(stream, io.TextIOBase)
@staticmethod
def _get_content_length(body, method):
"""Get the content-length based on the body.
If the body is None, we set Content-Length: 0 for methods that expect
a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
any method if the body is a str or bytes-like object and not a file.
"""
if body is None:
# do an explicit check for not None here to distinguish
# between unset and set but empty
if method.upper() in _METHODS_EXPECTING_BODY:
return 0
else:
return None
if hasattr(body, 'read'):
# file-like object.
return None
try:
# does it implement the buffer protocol (bytes, bytearray, array)?
mv = memoryview(body)
return mv.nbytes
except TypeError:
pass
if isinstance(body, str):
return len(body)
return None
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, blocksize=8192):
self.timeout = timeout
self.source_address = source_address
self.blocksize = blocksize
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTML connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != http.HTTPStatus.OK:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
if self.debuglevel > 0:
print('header:', line.decode())
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection(
(self.host,self.port), self.timeout, self.source_address)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(data)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.abc.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _read_readable(self, readable):
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while True:
datablock = readable.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
def _send_output(self, message_body=None, encode_chunked=False):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
# create a consistent interface to message_body
if hasattr(message_body, 'read'):
# Let file-like take precedence over byte-like. This
# is needed to allow the current position of mmap'ed
# files to be taken into account.
chunks = self._read_readable(message_body)
else:
try:
# this is solely to check to see if message_body
# implements the buffer API. it /would/ be easier
# to capture if PyObject_CheckBuffer was exposed
# to Python.
memoryview(message_body)
except TypeError:
try:
chunks = iter(message_body)
except TypeError:
raise TypeError("message_body should be a bytes-like "
"object or an iterable, got %r"
% type(message_body))
else:
# the object implements the buffer interface and
# can be passed directly into socket methods
chunks = (message_body,)
for chunk in chunks:
if not chunk:
if self.debuglevel > 0:
print('Zero length chunk ignored')
continue
if encode_chunked and self._http_vsn == 11:
# chunked encoding
chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \
+ b'\r\n'
self.send(chunk)
if encode_chunked and self._http_vsn == 11:
# end chunked transfer
self.send(b'0\r\n\r\n')
def putrequest(self, method, url, skip_host=False,
skip_accept_encoding=False):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
# Prevent CVE-2019-9740.
if match := _contains_disallowed_url_pchar_re.search(url):
raise InvalidURL(f"URL can't contain control characters. {url!r} "
f"(found at least {match.group()!r})")
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
if _is_illegal_header_value(values[i]):
raise ValueError('Invalid header value %r' % (values[i],))
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None, *, encode_chunked=False):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, encode_chunked=encode_chunked)
def request(self, method, url, body=None, headers={}, *,
encode_chunked=False):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers, encode_chunked)
def _send_request(self, method, url, body, headers, encode_chunked):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = frozenset(k.lower() for k in headers)
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
# chunked encoding will happen if HTTP/1.1 is used and either
# the caller passes encode_chunked=True or the following
# conditions hold:
# 1. content-length has not been explicitly set
# 2. the body is a file or iterable, but not a str or bytes-like
# 3. Transfer-Encoding has NOT been explicitly set by the caller
if 'content-length' not in header_names:
# only chunk body if not explicitly set for backwards
# compatibility, assuming the client code is already handling the
# chunking
if 'transfer-encoding' not in header_names:
# if content-length cannot be automatically determined, fall
# back to chunked encoding
encode_chunked = False
content_length = self._get_content_length(body, method)
if content_length is None:
if body is not None:
if self.debuglevel > 0:
print('Unable to determine size of %r' % body)
encode_chunked = True
self.putheader('Transfer-Encoding', 'chunked')
else:
self.putheader('Content-Length', str(content_length))
else:
encode_chunked = False
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
self.endheaders(body, encode_chunked=encode_chunked)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
try:
response.begin()
except ConnectionError:
self.close()
raise
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None, blocksize=8192):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address,
blocksize=blocksize)
if (key_file is not None or cert_file is not None or
check_hostname is not None):
import warnings
warnings.warn("key_file, cert_file and check_hostname are "
"deprecated, use a custom context instead.",
DeprecationWarning, 2)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
if check_hostname is not None:
self._context.check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
class RemoteDisconnected(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
# for backwards compatibility
error = HTTPException
| 37.020604
| 82
| 0.571352
|
import email.parser
import email.message
import http
import io
import re
import socket
import collections.abc
from urllib.parse import urlsplit
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
"responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
globals().update(http.HTTPStatus.__members__)
responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
_MAXLINE = 65536
_MAXHEADERS = 100
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more lenient than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
# These characters are not allowed within HTTP URL paths.
# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
# Prevents CVE-2019-9740. Includes control characters such as \r\n.
# We don't restrict chars above \x7f as putrequest() limits us to ASCII.
_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
# We are more lenient for assumed real world compatibility purposes.
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
def _encode(data, name='data'):
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
raise UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name)) from None
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
def getallmatchingheaders(self, name):
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
def __init__(self, sock, debuglevel=0, method=None, url=None):
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr, val in self.headers.items():
print("header:", hdr + ":", val)
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
self.will_close = self._check_close()
self.headers.get("content-length")
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0:
self.length = None
else:
self.length = None
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or
self._method == "HEAD"):
self.length = 0
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
return True
# End of "raw stream" methods
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i]
try:
return int(line, 16)
except ValueError:
self._close_conn()
raise
def _read_and_discard_trailer(self):
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk, discard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
data = self.fp.read(amt)
if len(data) < amt:
raise IncompleteRead(data, amt-len(data))
return data
def _safe_readinto(self, b):
amt = len(b)
n = self.fp.readinto(b)
if n < amt:
raise IncompleteRead(bytes(b[:n]), amt-n)
return n
def read1(self, n=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
result = self.fp.read1(n)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b''
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
return self.headers
def geturl(self):
return self.url
def getcode(self):
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
@staticmethod
def _is_textIO(stream):
return isinstance(stream, io.TextIOBase)
@staticmethod
def _get_content_length(body, method):
if body is None:
# do an explicit check for not None here to distinguish
# between unset and set but empty
if method.upper() in _METHODS_EXPECTING_BODY:
return 0
else:
return None
if hasattr(body, 'read'):
# file-like object.
return None
try:
# does it implement the buffer protocol (bytes, bytearray, array)?
mv = memoryview(body)
return mv.nbytes
except TypeError:
pass
if isinstance(body, str):
return len(body)
return None
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, blocksize=8192):
self.timeout = timeout
self.source_address = source_address
self.blocksize = blocksize
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']')
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "":
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != http.HTTPStatus.OK:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
break
if line in (b'\r\n', b'\n', b''):
break
if self.debuglevel > 0:
print('header:', line.decode())
def connect(self):
self.sock = self._create_connection(
(self.host,self.port), self.timeout, self.source_address)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._tunnel_host:
self._tunnel()
def close(self):
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(data)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.abc.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
self._buffer.append(s)
def _read_readable(self, readable):
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while True:
datablock = readable.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
def _send_output(self, message_body=None, encode_chunked=False):
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
if hasattr(message_body, 'read'):
# files to be taken into account.
chunks = self._read_readable(message_body)
else:
try:
# this is solely to check to see if message_body
# implements the buffer API. it /would/ be easier
# to capture if PyObject_CheckBuffer was exposed
# to Python.
memoryview(message_body)
except TypeError:
try:
chunks = iter(message_body)
except TypeError:
raise TypeError("message_body should be a bytes-like "
"object or an iterable, got %r"
% type(message_body))
else:
# the object implements the buffer interface and
# can be passed directly into socket methods
chunks = (message_body,)
for chunk in chunks:
if not chunk:
if self.debuglevel > 0:
print('Zero length chunk ignored')
continue
if encode_chunked and self._http_vsn == 11:
# chunked encoding
chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \
+ b'\r\n'
self.send(chunk)
if encode_chunked and self._http_vsn == 11:
# end chunked transfer
self.send(b'0\r\n\r\n')
def putrequest(self, method, url, skip_host=False,
skip_accept_encoding=False):
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
# Prevent CVE-2019-9740.
if match := _contains_disallowed_url_pchar_re.search(url):
raise InvalidURL(f"URL can't contain control characters. {url!r} "
f"(found at least {match.group()!r})")
request = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(request.encode('ascii'))
if self._http_vsn == 11:
if not skip_host:
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
if _is_illegal_header_value(values[i]):
raise ValueError('Invalid header value %r' % (values[i],))
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None, *, encode_chunked=False):
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, encode_chunked=encode_chunked)
def request(self, method, url, body=None, headers={}, *,
encode_chunked=False):
self._send_request(method, url, body, headers, encode_chunked)
def _send_request(self, method, url, body, headers, encode_chunked):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = frozenset(k.lower() for k in headers)
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
# chunked encoding will happen if HTTP/1.1 is used and either
# the caller passes encode_chunked=True or the following
# conditions hold:
# 1. content-length has not been explicitly set
# 2. the body is a file or iterable, but not a str or bytes-like
# 3. Transfer-Encoding has NOT been explicitly set by the caller
if 'content-length' not in header_names:
# only chunk body if not explicitly set for backwards
# compatibility, assuming the client code is already handling the
# chunking
if 'transfer-encoding' not in header_names:
# if content-length cannot be automatically determined, fall
# back to chunked encoding
encode_chunked = False
content_length = self._get_content_length(body, method)
if content_length is None:
if body is not None:
if self.debuglevel > 0:
print('Unable to determine size of %r' % body)
encode_chunked = True
self.putheader('Transfer-Encoding', 'chunked')
else:
self.putheader('Content-Length', str(content_length))
else:
encode_chunked = False
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
self.endheaders(body, encode_chunked=encode_chunked)
def getresponse(self):
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
try:
response.begin()
except ConnectionError:
self.close()
raise
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
self.close()
else:
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None, blocksize=8192):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address,
blocksize=blocksize)
if (key_file is not None or cert_file is not None or
check_hostname is not None):
import warnings
warnings.warn("key_file, cert_file and check_hostname are "
"deprecated, use a custom context instead.",
DeprecationWarning, 2)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
if check_hostname is not None:
self._context.check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
__all__.append("HTTPSConnection")
class HTTPException(Exception):
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
class RemoteDisconnected(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
error = HTTPException
| true
| true
|
f71a07d0760e6b2878001901d08de4bc02ae7c09
| 3,381
|
py
|
Python
|
static_data/mk_lookup.py
|
flyingsymbols/arewebeatingcovid19
|
78370472432700bb84796035c93868fb1887c055
|
[
"MIT"
] | 1
|
2020-04-18T08:41:00.000Z
|
2020-04-18T08:41:00.000Z
|
static_data/mk_lookup.py
|
flyingsymbols/arewebeatingcovid19
|
78370472432700bb84796035c93868fb1887c055
|
[
"MIT"
] | null | null | null |
static_data/mk_lookup.py
|
flyingsymbols/arewebeatingcovid19
|
78370472432700bb84796035c93868fb1887c055
|
[
"MIT"
] | null | null | null |
import os
import csv
import copy
import json
DIR = os.path.dirname(__file__)
def rel(*p): return os.path.normpath(os.path.join(DIR, *p))
CENSUS_DATA = rel('nst-est2019-alldata.csv')
OUT_JSON = rel('state_data.json')
def main():
state_data = copy.deepcopy(STATE_DATA)
state_name_ind = {} # { name: ind of record in STATE_DATA }
state_abbrev_ind = {} # { abbrev: ind of records in STATE_DATA }
for i, v in enumerate(state_data):
state_name_ind[v['name']] = i
state_abbrev_ind[v['abbrev']] = i
with open(CENSUS_DATA, 'r') as f:
csv_r = csv.DictReader(f)
for row in csv_r:
name = row['NAME']
population = int(row['POPESTIMATE2019'])
if name not in state_name_ind:
continue
else:
data_row_i = state_name_ind[name]
state_data[data_row_i]['population'] = population
state_json_data = {
'name_ind': state_name_ind,
'abbrev_ind': state_abbrev_ind,
'data': state_data
}
state_json_str = json.dumps(state_json_data, indent=2)
with open(OUT_JSON, 'w') as f:
json.dump(state_json_data, f, indent=2)
STATE_DATA = [
{"name": "Alabama", "abbrev": "AL"},
{"name": "Alaska", "abbrev": "AK"},
{"name": "Arizona", "abbrev": "AZ"},
{"name": "Arkansas", "abbrev": "AR"},
{"name": "California", "abbrev": "CA"},
{"name": "Colorado", "abbrev": "CO"},
{"name": "Connecticut", "abbrev": "CT"},
{"name": "Delaware", "abbrev": "DE"},
{"name": "Florida", "abbrev": "FL"},
{"name": "Georgia", "abbrev": "GA"},
{"name": "Hawaii", "abbrev": "HI"},
{"name": "Idaho", "abbrev": "ID"},
{"name": "Illinois", "abbrev": "IL"},
{"name": "Indiana", "abbrev": "IN"},
{"name": "Iowa", "abbrev": "IA"},
{"name": "Kansas", "abbrev": "KS"},
{"name": "Kentucky", "abbrev": "KY"},
{"name": "Louisiana", "abbrev": "LA"},
{"name": "Maine", "abbrev": "ME"},
{"name": "Maryland", "abbrev": "MD"},
{"name": "Massachusetts", "abbrev": "MA"},
{"name": "Michigan", "abbrev": "MI"},
{"name": "Minnesota", "abbrev": "MN"},
{"name": "Mississippi", "abbrev": "MS"},
{"name": "Missouri", "abbrev": "MO"},
{"name": "Montana", "abbrev": "MT"},
{"name": "Nebraska", "abbrev": "NE"},
{"name": "Nevada", "abbrev": "NV"},
{"name": "New Hampshire", "abbrev": "NH"},
{"name": "New Jersey", "abbrev": "NJ"},
{"name": "New Mexico", "abbrev": "NM"},
{"name": "New York", "abbrev": "NY"},
{"name": "North Carolina", "abbrev": "NC"},
{"name": "North Dakota", "abbrev": "ND"},
{"name": "Ohio", "abbrev": "OH"},
{"name": "Oklahoma", "abbrev": "OK"},
{"name": "Oregon", "abbrev": "OR"},
{"name": "Pennsylvania", "abbrev": "PA"},
{"name": "Rhode Island", "abbrev": "RI"},
{"name": "South Carolina", "abbrev": "SC"},
{"name": "South Dakota", "abbrev": "SD"},
{"name": "Tennessee", "abbrev": "TN"},
{"name": "Texas", "abbrev": "TX"},
{"name": "Utah", "abbrev": "UT"},
{"name": "Vermont", "abbrev": "VT"},
{"name": "Virginia", "abbrev": "VA"},
{"name": "Washington", "abbrev": "WA"},
{"name": "West Virginia", "abbrev": "WV"},
{"name": "Wisconsin", "abbrev": "WI"},
{"name": "Wyoming", "abbrev": "WY"},
]
if __name__ == '__main__':
main()
| 33.147059
| 70
| 0.525288
|
import os
import csv
import copy
import json
DIR = os.path.dirname(__file__)
def rel(*p): return os.path.normpath(os.path.join(DIR, *p))
CENSUS_DATA = rel('nst-est2019-alldata.csv')
OUT_JSON = rel('state_data.json')
def main():
state_data = copy.deepcopy(STATE_DATA)
state_name_ind = {}
state_abbrev_ind = {}
for i, v in enumerate(state_data):
state_name_ind[v['name']] = i
state_abbrev_ind[v['abbrev']] = i
with open(CENSUS_DATA, 'r') as f:
csv_r = csv.DictReader(f)
for row in csv_r:
name = row['NAME']
population = int(row['POPESTIMATE2019'])
if name not in state_name_ind:
continue
else:
data_row_i = state_name_ind[name]
state_data[data_row_i]['population'] = population
state_json_data = {
'name_ind': state_name_ind,
'abbrev_ind': state_abbrev_ind,
'data': state_data
}
state_json_str = json.dumps(state_json_data, indent=2)
with open(OUT_JSON, 'w') as f:
json.dump(state_json_data, f, indent=2)
STATE_DATA = [
{"name": "Alabama", "abbrev": "AL"},
{"name": "Alaska", "abbrev": "AK"},
{"name": "Arizona", "abbrev": "AZ"},
{"name": "Arkansas", "abbrev": "AR"},
{"name": "California", "abbrev": "CA"},
{"name": "Colorado", "abbrev": "CO"},
{"name": "Connecticut", "abbrev": "CT"},
{"name": "Delaware", "abbrev": "DE"},
{"name": "Florida", "abbrev": "FL"},
{"name": "Georgia", "abbrev": "GA"},
{"name": "Hawaii", "abbrev": "HI"},
{"name": "Idaho", "abbrev": "ID"},
{"name": "Illinois", "abbrev": "IL"},
{"name": "Indiana", "abbrev": "IN"},
{"name": "Iowa", "abbrev": "IA"},
{"name": "Kansas", "abbrev": "KS"},
{"name": "Kentucky", "abbrev": "KY"},
{"name": "Louisiana", "abbrev": "LA"},
{"name": "Maine", "abbrev": "ME"},
{"name": "Maryland", "abbrev": "MD"},
{"name": "Massachusetts", "abbrev": "MA"},
{"name": "Michigan", "abbrev": "MI"},
{"name": "Minnesota", "abbrev": "MN"},
{"name": "Mississippi", "abbrev": "MS"},
{"name": "Missouri", "abbrev": "MO"},
{"name": "Montana", "abbrev": "MT"},
{"name": "Nebraska", "abbrev": "NE"},
{"name": "Nevada", "abbrev": "NV"},
{"name": "New Hampshire", "abbrev": "NH"},
{"name": "New Jersey", "abbrev": "NJ"},
{"name": "New Mexico", "abbrev": "NM"},
{"name": "New York", "abbrev": "NY"},
{"name": "North Carolina", "abbrev": "NC"},
{"name": "North Dakota", "abbrev": "ND"},
{"name": "Ohio", "abbrev": "OH"},
{"name": "Oklahoma", "abbrev": "OK"},
{"name": "Oregon", "abbrev": "OR"},
{"name": "Pennsylvania", "abbrev": "PA"},
{"name": "Rhode Island", "abbrev": "RI"},
{"name": "South Carolina", "abbrev": "SC"},
{"name": "South Dakota", "abbrev": "SD"},
{"name": "Tennessee", "abbrev": "TN"},
{"name": "Texas", "abbrev": "TX"},
{"name": "Utah", "abbrev": "UT"},
{"name": "Vermont", "abbrev": "VT"},
{"name": "Virginia", "abbrev": "VA"},
{"name": "Washington", "abbrev": "WA"},
{"name": "West Virginia", "abbrev": "WV"},
{"name": "Wisconsin", "abbrev": "WI"},
{"name": "Wyoming", "abbrev": "WY"},
]
if __name__ == '__main__':
main()
| true
| true
|
f71a09128c188832b08bc19072a6ef2f2c8d9dde
| 3,136
|
py
|
Python
|
music_preprocessor/music_preprocessor.py
|
offy284/Keras-GAN
|
6652c626ba584ffd1c25ca4e925e6f131077395c
|
[
"MIT"
] | null | null | null |
music_preprocessor/music_preprocessor.py
|
offy284/Keras-GAN
|
6652c626ba584ffd1c25ca4e925e6f131077395c
|
[
"MIT"
] | null | null | null |
music_preprocessor/music_preprocessor.py
|
offy284/Keras-GAN
|
6652c626ba584ffd1c25ca4e925e6f131077395c
|
[
"MIT"
] | null | null | null |
import itertools
import shutil
import os
from os import listdir
from os.path import isfile, join
from tqdm import tqdm
import numpy as np
import scipy
from scipy.io.wavfile import write, read
from scipy.fftpack import fft
from scipy import signal
from scipy.fft import fftshift
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
RESOLUTION_SCALE = 10
def flatten_dir(dir):
print("Flattening MusicData directory...")
all_files = []
dups = 0
for root, _dirs, files in itertools.islice(os.walk(dir), 1, None):
try:
for filename in files:
all_files.append(os.path.join(root, filename))
except:
dups += 1
for filename in all_files:
try:
shutil.move(filename, dir)
except:
dups += 1
print(f"{dups} duplicate files removed")
def generate_big_music(resolution_scale=RESOLUTION_SCALE):
print("Generating big_music from MusicData directory...")
onlyfiles = [f for f in listdir("MusicData/") if isfile(join("MusicData/", f))]
print("Normalizing big_music...")
square_size = 28 * resolution_scale
big_music = np.empty((1)) # np.empty((len(onlyfiles), square_size, square_size, 1))
for i in tqdm(range(len(onlyfiles))):
file = onlyfiles[i]
if "-converted" in file:
x = scipy.io.wavfile.read(f"MusicData/{file}")
x = x[1]
#big_music = big_music.reshape(-1)
'''
print(f"Building spectrogram...")
plt.specgram(x, Fs=44100)
plt.savefig(f'MusicImageData/{file}.png')
x = x.reshape(-1, 1)
min_max_scaler = MinMaxScaler()
x = (min_max_scaler.fit_transform(x) - .5) * 2
samples = list(np.empty((int(x.shape[0] / square_size / square_size), square_size, square_size, 1)))
rows = np.zeros((square_size, square_size, 1))
cols = np.zeros((square_size, 1))
for samplei in tqdm(range(len(samples))):
for yi in range(square_size):
for xi in range(square_size):
cols[xi] = x[xi + yi * square_size + samplei * square_size * square_size]
rows[yi] = cols
samples[samplei] = rows
'''
print("Numpyifying x...")
big_music = np.concatenate([big_music, x])
print(f"big_music is of shape {big_music.shape}")
freqs, times, spectrogram = signal.spectrogram(big_music, 44100)
spectrogram = spectrogram.reshape((spectrogram.shape[1], spectrogram.shape[0]))
print(spectrogram.shape)
filename = f"spectrogram.npy"
print(f"Saving {filename}...")
np.save(f"{filename}", spectrogram)
filename = f"freqs.npy"
print(f"Saving {filename}...")
np.save(f"{filename}", freqs)
filename = f"times.npy"
print(f"Saving {filename}...")
np.save(f"{filename}", times)
if __name__ == '__main__':
print("Music Preprocessor v0.1")
#flatten_dir()
generate_big_music()
| 29.866667
| 112
| 0.607781
|
import itertools
import shutil
import os
from os import listdir
from os.path import isfile, join
from tqdm import tqdm
import numpy as np
import scipy
from scipy.io.wavfile import write, read
from scipy.fftpack import fft
from scipy import signal
from scipy.fft import fftshift
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
RESOLUTION_SCALE = 10
def flatten_dir(dir):
print("Flattening MusicData directory...")
all_files = []
dups = 0
for root, _dirs, files in itertools.islice(os.walk(dir), 1, None):
try:
for filename in files:
all_files.append(os.path.join(root, filename))
except:
dups += 1
for filename in all_files:
try:
shutil.move(filename, dir)
except:
dups += 1
print(f"{dups} duplicate files removed")
def generate_big_music(resolution_scale=RESOLUTION_SCALE):
print("Generating big_music from MusicData directory...")
onlyfiles = [f for f in listdir("MusicData/") if isfile(join("MusicData/", f))]
print("Normalizing big_music...")
square_size = 28 * resolution_scale
big_music = np.empty((1))
for i in tqdm(range(len(onlyfiles))):
file = onlyfiles[i]
if "-converted" in file:
x = scipy.io.wavfile.read(f"MusicData/{file}")
x = x[1]
print("Numpyifying x...")
big_music = np.concatenate([big_music, x])
print(f"big_music is of shape {big_music.shape}")
freqs, times, spectrogram = signal.spectrogram(big_music, 44100)
spectrogram = spectrogram.reshape((spectrogram.shape[1], spectrogram.shape[0]))
print(spectrogram.shape)
filename = f"spectrogram.npy"
print(f"Saving {filename}...")
np.save(f"{filename}", spectrogram)
filename = f"freqs.npy"
print(f"Saving {filename}...")
np.save(f"{filename}", freqs)
filename = f"times.npy"
print(f"Saving {filename}...")
np.save(f"{filename}", times)
if __name__ == '__main__':
print("Music Preprocessor v0.1")
generate_big_music()
| true
| true
|
f71a09cfb0b4712bce6ade7ab4148ea05334dfee
| 1,077
|
py
|
Python
|
database/dbclient.py
|
sonudoo/password-manager
|
6fa1d2ebeba5b0f9cff200b32a581321d109b9cd
|
[
"MIT"
] | null | null | null |
database/dbclient.py
|
sonudoo/password-manager
|
6fa1d2ebeba5b0f9cff200b32a581321d109b9cd
|
[
"MIT"
] | null | null | null |
database/dbclient.py
|
sonudoo/password-manager
|
6fa1d2ebeba5b0f9cff200b32a581321d109b9cd
|
[
"MIT"
] | null | null | null |
import pymongo
class DbClient:
"""Creates an instance of pymongo client and stores it in a private variable.
The instance of this class is injected as a dependency for request validators and processors.
Attributes:
database (Database): The database object.
collection_list (list): List of collection names as str.
"""
database = None
collection_list = None
def __init__(self, mongo_uri, database):
"""
Args:
mongo_uri (str): Uri of the MongoDB database.
database (str): Name of the database.
"""
client = pymongo.MongoClient(mongo_uri)
self.database = client[database]
self.collection_list = [collection for collection in self.database.collection_names()]
def get_collection(self, collection):
"""
Args:
collection (str): Name of the collection to get.
Returns:
Collection: The collection by name.
"""
assert collection in self.collection_list
return self.database[collection]
| 32.636364
| 97
| 0.637883
|
import pymongo
class DbClient:
database = None
collection_list = None
def __init__(self, mongo_uri, database):
client = pymongo.MongoClient(mongo_uri)
self.database = client[database]
self.collection_list = [collection for collection in self.database.collection_names()]
def get_collection(self, collection):
assert collection in self.collection_list
return self.database[collection]
| true
| true
|
f71a0a1f8ed72da50b25bdb3d34573679f492d53
| 4,542
|
py
|
Python
|
tests/clvm/test_chialisp_deserialization.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | 6
|
2021-07-15T16:52:46.000Z
|
2021-09-27T16:57:08.000Z
|
tests/clvm/test_chialisp_deserialization.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | 6
|
2021-07-27T08:17:34.000Z
|
2021-11-30T11:39:19.000Z
|
tests/clvm/test_chialisp_deserialization.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | 7
|
2021-08-15T15:10:58.000Z
|
2021-10-04T16:47:39.000Z
|
from unittest import TestCase
from littlelambocoin.types.blockchain_format.program import Program, INFINITE_COST
from littlelambocoin.util.byte_types import hexstr_to_bytes
from littlelambocoin.wallet.puzzles.load_clvm import load_clvm
DESERIALIZE_MOD = load_clvm("littlelambocoinlisp_deserialisation.clvm", package_or_requirement="littlelambocoin.wallet.puzzles")
def serialized_atom_overflow(size):
if size == 0:
size_blob = b"\x80"
elif size < 0x40:
size_blob = bytes([0x80 | size])
elif size < 0x2000:
size_blob = bytes([0xC0 | (size >> 8), (size >> 0) & 0xFF])
elif size < 0x100000:
size_blob = bytes([0xE0 | (size >> 16), (size >> 8) & 0xFF, (size >> 0) & 0xFF])
elif size < 0x8000000:
size_blob = bytes(
[
0xF0 | (size >> 24),
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
elif size < 0x400000000:
size_blob = bytes(
[
0xF8 | (size >> 32),
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
else:
size_blob = bytes(
[
0xFC | ((size >> 40) & 0xFF),
(size >> 32) & 0xFF,
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
extra_str = "01" * 1000
return size_blob.hex() + extra_str
class TestClvmNativeDeserialization(TestCase):
"""
Test clvm deserialization done from within the clvm
"""
def test_deserialization_simple_list(self):
# ("hello" "friend")
b = hexstr_to_bytes("ff8568656c6c6fff86667269656e6480")
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_password_coin(self):
# (i (= (sha256 2) (q 0x2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824)) (c (q 51) (c 5 (c (q 100) (q ())))) (q "wrong password")) # noqa
b = hexstr_to_bytes(
"ff04ffff0affff0bff0280ffff01ffa02cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b98248080ffff05ffff01ff3380ffff05ff05ffff05ffff01ff6480ffff01ff8080808080ffff01ff8e77726f6e672070617373776f72648080" # noqa
) # noqa
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_large_numbers(self):
# '(99999999999999999999999999999999999999999999999999999999999999999 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -99999999999999999999999999999999999999999999999999999999999999999999999999999)' # noqa
b = hexstr_to_bytes(
"ff9c00f316271c7fc3908a8bef464e3945ef7a253609ffffffffffffffffffb00fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1ff22ea0179500526edb610f148ec0c614155678491902d6000000000000000000180" # noqa
) # noqa
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_overflow_atoms(self):
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x3FFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x1FFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
| 39.495652
| 264
| 0.624835
|
from unittest import TestCase
from littlelambocoin.types.blockchain_format.program import Program, INFINITE_COST
from littlelambocoin.util.byte_types import hexstr_to_bytes
from littlelambocoin.wallet.puzzles.load_clvm import load_clvm
DESERIALIZE_MOD = load_clvm("littlelambocoinlisp_deserialisation.clvm", package_or_requirement="littlelambocoin.wallet.puzzles")
def serialized_atom_overflow(size):
if size == 0:
size_blob = b"\x80"
elif size < 0x40:
size_blob = bytes([0x80 | size])
elif size < 0x2000:
size_blob = bytes([0xC0 | (size >> 8), (size >> 0) & 0xFF])
elif size < 0x100000:
size_blob = bytes([0xE0 | (size >> 16), (size >> 8) & 0xFF, (size >> 0) & 0xFF])
elif size < 0x8000000:
size_blob = bytes(
[
0xF0 | (size >> 24),
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
elif size < 0x400000000:
size_blob = bytes(
[
0xF8 | (size >> 32),
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
else:
size_blob = bytes(
[
0xFC | ((size >> 40) & 0xFF),
(size >> 32) & 0xFF,
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
extra_str = "01" * 1000
return size_blob.hex() + extra_str
class TestClvmNativeDeserialization(TestCase):
def test_deserialization_simple_list(self):
b = hexstr_to_bytes("ff8568656c6c6fff86667269656e6480")
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_password_coin(self):
b = hexstr_to_bytes(
"ff04ffff0affff0bff0280ffff01ffa02cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b98248080ffff05ffff01ff3380ffff05ff05ffff05ffff01ff6480ffff01ff8080808080ffff01ff8e77726f6e672070617373776f72648080"
)
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_large_numbers(self):
b = hexstr_to_bytes(
"ff9c00f316271c7fc3908a8bef464e3945ef7a253609ffffffffffffffffffb00fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1ff22ea0179500526edb610f148ec0c614155678491902d6000000000000000000180"
)
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_overflow_atoms(self):
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x3FFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x1FFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
| true
| true
|
f71a0aa7e2a9704ecbc6e1a45727204696ea1a5b
| 2,036
|
py
|
Python
|
Discord 1.0.0a - REWRITE/EstruturaBots/CogsSharding/main.py
|
Algueem/Discord-Bot-Python-Tutoriais
|
cd126828a21fba4be584ffb62f923fa12086307b
|
[
"MIT"
] | 1
|
2018-10-14T16:45:32.000Z
|
2018-10-14T16:45:32.000Z
|
Discord 1.0.0a - REWRITE/EstruturaBots/CogsSharding/main.py
|
ikrost/Discord-Bot-Python-Tutoriais
|
cd126828a21fba4be584ffb62f923fa12086307b
|
[
"MIT"
] | null | null | null |
Discord 1.0.0a - REWRITE/EstruturaBots/CogsSharding/main.py
|
ikrost/Discord-Bot-Python-Tutoriais
|
cd126828a21fba4be584ffb62f923fa12086307b
|
[
"MIT"
] | 2
|
2019-04-26T21:37:38.000Z
|
2019-05-07T17:37:26.000Z
|
import discord
from discord.ext import commands
import json
#vamos abrir o setup json para pegar as informaçoes
with open('bot_setup.json') as vagner:
bot_settings =json.load(vagner)
#lista de comandos
# cmds.info o cmds que dizer o nome da pastar e o info o nome do arquivo
#pode fazer tbm cmds.adm.ban caso queria deixar mais organizado a cada . entra em um diretorio
cmd_open=['cmds.info','cmds.cooldown']
#vamos criar o setup do bot
class main(commands.AutoShardedBot):
def __init__(self):
super().__init__(command_prefix=bot_settings['prefixo'],
description="tutorial Cogs Rewrite",
pm_help=None,
#aqui iremos definir a quantidade de shards
shard_count=bot_settings['shard_count'])
self.token = bot_settings['token']
#Aqui é para remover aquele help padrão mo feio
self.remove_command('help')
#agora vamos ao eventos do bot
async def on_ready(self):
#carregar os comandos
for extension in cmd_open:
try:
bot.load_extension(extension)
print(f"Comando {extension} carregado com sucesso")
except Exception as e:
exc = '{}.{}'.format(type(e).__name__, e)
print('falha ao carregar extensoes {} . {} detalhes {}'.format(extension, e,exc))
await self.change_presence(activity=discord.Activity(name='tutorial vagner',type=discord.ActivityType.listening))
print("Logado.")
async def on_message(self,message):
#vamos bloquear o bot para n responder a bots
if message.author.bot:
pass
#vamos impedir comandos via dm
elif isinstance(message.channel, discord.abc.GuildChannel) is False:
return
else:
await bot.process_commands(message)
#funcão para logar o bot
def run(self):
super().run(bot.token, reconnect=True)
if __name__ =="__main__":
bot = main()
bot.run()
| 33.377049
| 121
| 0.632613
|
import discord
from discord.ext import commands
import json
with open('bot_setup.json') as vagner:
bot_settings =json.load(vagner)
cmd_open=['cmds.info','cmds.cooldown']
class main(commands.AutoShardedBot):
def __init__(self):
super().__init__(command_prefix=bot_settings['prefixo'],
description="tutorial Cogs Rewrite",
pm_help=None,
shard_count=bot_settings['shard_count'])
self.token = bot_settings['token']
self.remove_command('help')
async def on_ready(self):
for extension in cmd_open:
try:
bot.load_extension(extension)
print(f"Comando {extension} carregado com sucesso")
except Exception as e:
exc = '{}.{}'.format(type(e).__name__, e)
print('falha ao carregar extensoes {} . {} detalhes {}'.format(extension, e,exc))
await self.change_presence(activity=discord.Activity(name='tutorial vagner',type=discord.ActivityType.listening))
print("Logado.")
async def on_message(self,message):
if message.author.bot:
pass
elif isinstance(message.channel, discord.abc.GuildChannel) is False:
return
else:
await bot.process_commands(message)
def run(self):
super().run(bot.token, reconnect=True)
if __name__ =="__main__":
bot = main()
bot.run()
| true
| true
|
f71a0aff4fcdf231c01d2475d9139acabde40491
| 1,135
|
py
|
Python
|
setup.py
|
hugis/robotframework-djangorobotlibrary
|
89400ea24a5d8ecf4c619fd39dc7d0a547c73fe7
|
[
"MIT"
] | null | null | null |
setup.py
|
hugis/robotframework-djangorobotlibrary
|
89400ea24a5d8ecf4c619fd39dc7d0a547c73fe7
|
[
"MIT"
] | null | null | null |
setup.py
|
hugis/robotframework-djangorobotlibrary
|
89400ea24a5d8ecf4c619fd39dc7d0a547c73fe7
|
[
"MIT"
] | null | null | null |
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="robotframework-djangorobotlibrary",
version="19.1a0",
description="A Robot Framework library for Django.",
long_description=long_description,
url="https://github.com/hugis/robotframework-djangorobotlibrary",
author="Peter Hyben",
author_email="peter.hyben@hugis.eu",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Framework :: Robot Framework",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="robotframework django test",
packages=find_packages(),
install_requires=["Django>=2.2", "factory_boy", "robotframework"],
project_urls={
"Source": "https://github.com/hugis/robotframework-djangorobotlibrary"
},
)
| 31.527778
| 78
| 0.656388
|
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="robotframework-djangorobotlibrary",
version="19.1a0",
description="A Robot Framework library for Django.",
long_description=long_description,
url="https://github.com/hugis/robotframework-djangorobotlibrary",
author="Peter Hyben",
author_email="peter.hyben@hugis.eu",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Framework :: Robot Framework",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="robotframework django test",
packages=find_packages(),
install_requires=["Django>=2.2", "factory_boy", "robotframework"],
project_urls={
"Source": "https://github.com/hugis/robotframework-djangorobotlibrary"
},
)
| true
| true
|
f71a0b9b1f1d422978ee7d52875c6f364e06e910
| 201
|
py
|
Python
|
api/words_vector/admin.py
|
leandrocamposcardoso/VetorDePalavras
|
76d442d0343e85a0edc55ca91b76480c30b3127a
|
[
"MIT"
] | null | null | null |
api/words_vector/admin.py
|
leandrocamposcardoso/VetorDePalavras
|
76d442d0343e85a0edc55ca91b76480c30b3127a
|
[
"MIT"
] | null | null | null |
api/words_vector/admin.py
|
leandrocamposcardoso/VetorDePalavras
|
76d442d0343e85a0edc55ca91b76480c30b3127a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Logs
# Register your models here.
@admin.register(Logs)
class TextAdmin(admin.ModelAdmin):
list_display = ('files', 'vocabulary', 'vectors')
| 20.1
| 53
| 0.746269
|
from django.contrib import admin
from .models import Logs
@admin.register(Logs)
class TextAdmin(admin.ModelAdmin):
list_display = ('files', 'vocabulary', 'vectors')
| true
| true
|
f71a0bd6b7d9c82ddfd1fe5eeabf8b4cdd16ce54
| 1,108
|
py
|
Python
|
fake_fs.py
|
osteotek/yamr
|
d54a092a8520c4b3133db9a87d4fc013879fbf33
|
[
"MIT"
] | 3
|
2017-07-11T15:33:35.000Z
|
2021-03-11T22:14:33.000Z
|
fake_fs.py
|
osteotek/yamr
|
d54a092a8520c4b3133db9a87d4fc013879fbf33
|
[
"MIT"
] | null | null | null |
fake_fs.py
|
osteotek/yamr
|
d54a092a8520c4b3133db9a87d4fc013879fbf33
|
[
"MIT"
] | 1
|
2017-02-19T21:46:35.000Z
|
2017-02-19T21:46:35.000Z
|
import os
from enums import Status
class FakeFS:
def __init__(self, base_dir="/var/fake_fs"):
self.base_dir = base_dir
def get_chunk(self, path):
full_path = self.base_dir + path
if not os.path.isfile(full_path):
return {'status': Status.not_found}
data = None
with open(full_path, 'r') as f:
data = f.read()
return {'status': Status.ok, 'data': data}
def download_to(self, v_path, l_path):
full_path = self.base_dir + v_path
if not os.path.isfile(full_path):
return {'status': Status.not_found}
data = None
with open(full_path, 'r') as f:
data = f.read()
os.makedirs(os.path.dirname(l_path), exist_ok=True)
with open(l_path, "w") as f:
f.write(data)
return {'status': Status.ok}
def save(self, data, path):
full_path = self.base_dir + path
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w+') as f:
f.write(data)
return {'status': Status.ok}
| 25.767442
| 62
| 0.5713
|
import os
from enums import Status
class FakeFS:
def __init__(self, base_dir="/var/fake_fs"):
self.base_dir = base_dir
def get_chunk(self, path):
full_path = self.base_dir + path
if not os.path.isfile(full_path):
return {'status': Status.not_found}
data = None
with open(full_path, 'r') as f:
data = f.read()
return {'status': Status.ok, 'data': data}
def download_to(self, v_path, l_path):
full_path = self.base_dir + v_path
if not os.path.isfile(full_path):
return {'status': Status.not_found}
data = None
with open(full_path, 'r') as f:
data = f.read()
os.makedirs(os.path.dirname(l_path), exist_ok=True)
with open(l_path, "w") as f:
f.write(data)
return {'status': Status.ok}
def save(self, data, path):
full_path = self.base_dir + path
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w+') as f:
f.write(data)
return {'status': Status.ok}
| true
| true
|
f71a0c12785a008b991a752c3e60e2420e801e74
| 879
|
py
|
Python
|
MatchSocks.py
|
zubin-madon/PottyPunksNFT
|
d43234641ea3f30c963fb3af7edb249862a62788
|
[
"MIT"
] | null | null | null |
MatchSocks.py
|
zubin-madon/PottyPunksNFT
|
d43234641ea3f30c963fb3af7edb249862a62788
|
[
"MIT"
] | null | null | null |
MatchSocks.py
|
zubin-madon/PottyPunksNFT
|
d43234641ea3f30c963fb3af7edb249862a62788
|
[
"MIT"
] | null | null | null |
#Match socks to pant colour.
import numpy as np
from PIL import Image
import urllib.request
import os
directory = 'layers/layers_for_art_engine/Pant'
for filename in os.listdir(directory):
image = os.path.join(directory, filename)
pant = Image.open(image)
socks = Image.open('layers/socks.png') #change the file path with your own of course!
width, height = socks.size
pant_color = pant.getpixel((200, 350))
for x in range(width):
for y in range(height):
current_color = socks.getpixel((x, y))
r = pant_color[0]
g = pant_color[1]
b = pant_color[2]
a = current_color[-1]
if current_color != (255, 255, 255, 0):
socks.putpixel((x, y), (r, g, b, a))
pant.paste(socks, (0, 0), socks) #combine the new coloured socks with the pant layer.
pant.save(image)
| 35.16
| 89
| 0.622298
|
import numpy as np
from PIL import Image
import urllib.request
import os
directory = 'layers/layers_for_art_engine/Pant'
for filename in os.listdir(directory):
image = os.path.join(directory, filename)
pant = Image.open(image)
socks = Image.open('layers/socks.png')
width, height = socks.size
pant_color = pant.getpixel((200, 350))
for x in range(width):
for y in range(height):
current_color = socks.getpixel((x, y))
r = pant_color[0]
g = pant_color[1]
b = pant_color[2]
a = current_color[-1]
if current_color != (255, 255, 255, 0):
socks.putpixel((x, y), (r, g, b, a))
pant.paste(socks, (0, 0), socks)
pant.save(image)
| true
| true
|
f71a0cdd77d197858c517e9b653ef4a7fe7e5d24
| 1,462
|
py
|
Python
|
gae/third_party/poster/__init__.py
|
Purus/LaunchKitDocker
|
b8aaf9f1d8943a76ae7e0a81e15e6bebd4b9b08e
|
[
"Apache-2.0"
] | 2,341
|
2016-07-27T17:23:23.000Z
|
2022-03-28T03:55:15.000Z
|
gae/third_party/poster/__init__.py
|
Purus/LaunchKitDocker
|
b8aaf9f1d8943a76ae7e0a81e15e6bebd4b9b08e
|
[
"Apache-2.0"
] | 52
|
2016-07-27T23:12:21.000Z
|
2022-03-11T23:17:41.000Z
|
gae/third_party/poster/__init__.py
|
Purus/LaunchKitDocker
|
b8aaf9f1d8943a76ae7e0a81e15e6bebd4b9b08e
|
[
"Apache-2.0"
] | 324
|
2016-07-27T18:34:53.000Z
|
2022-03-25T08:56:24.000Z
|
# Copyright (c) 2011 Chris AtLee
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""poster module
Support for streaming HTTP uploads, and multipart/form-data encoding
```poster.version``` is a 3-tuple of integers representing the version number.
New releases of poster will always have a version number that compares greater
than an older version of poster.
New in version 0.6."""
import streaminghttp
import encode
version = (0, 8, 1) # Thanks JP!
| 44.30303
| 79
| 0.776334
|
import streaminghttp
import encode
version = (0, 8, 1)
| true
| true
|
f71a0d63e90a61ad5e75bd468ec2c1a1b9348342
| 5,306
|
py
|
Python
|
test/functional/abc-p2p-avalanche.py
|
kryvel/bitcoin-abc
|
6330d8ccc8b1b720c42c8c9239dadc8240ca5025
|
[
"MIT"
] | null | null | null |
test/functional/abc-p2p-avalanche.py
|
kryvel/bitcoin-abc
|
6330d8ccc8b1b720c42c8c9239dadc8240ca5025
|
[
"MIT"
] | null | null | null |
test/functional/abc-p2p-avalanche.py
|
kryvel/bitcoin-abc
|
6330d8ccc8b1b720c42c8c9239dadc8240ca5025
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the resolution of forks via avalanche."""
import random
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.messages import AvalancheVote, CInv, msg_avapoll
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until
from test_framework import schnorr
BLOCK_ACCEPTED = 0
BLOCK_REJECTED = 1
BLOCK_UNKNOWN = -1
class TestNode(P2PInterface):
def __init__(self):
self.last_avaresponse = None
super().__init__()
def on_avaresponse(self, message):
self.last_avaresponse = message.response
def send_poll(self, hashes):
msg = msg_avapoll()
for h in hashes:
msg.poll.invs.append(CInv(2, h))
self.send_message(msg)
def wait_for_avaresponse(self, timeout=10):
self.sync_with_ping()
def test_function():
m = self.last_message.get("avaresponse")
return m is not None and m != self.last_avaresponse
wait_until(test_function, timeout=timeout, lock=mininode_lock)
class AvalancheTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-enableavalanche=1', '-avacooldown=0']]
def run_test(self):
node = self.nodes[0]
# Create a fake node and connect it to our real node.
poll_node = TestNode()
node.add_p2p_connection(poll_node)
poll_node.wait_for_verack()
poll_node.sync_with_ping()
# Generate many block and poll for them.
address = node.get_deterministic_priv_key().address
node.generatetoaddress(100, address)
# Get the key so we can verify signatures.
avakey = bytes.fromhex(node.getavalanchekey())
self.log.info("Poll for the chain tip...")
best_block_hash = int(node.getbestblockhash(), 16)
poll_node.send_poll([best_block_hash])
poll_node.wait_for_avaresponse()
def assert_response(response, expected):
r = response.response
assert_equal(r.cooldown, 0)
# Verify signature.
assert schnorr.verify(response.sig, avakey, r.get_hash())
votes = r.votes
self.log.info("response: {}".format(repr(response)))
assert_equal(len(votes), len(expected))
for i in range(0, len(votes)):
assert_equal(repr(votes[i]), repr(expected[i]))
assert_response(poll_node.last_avaresponse, [
AvalancheVote(BLOCK_ACCEPTED, best_block_hash)])
self.log.info("Poll for a selection of blocks...")
various_block_hashes = [
int(node.getblockhash(0), 16),
int(node.getblockhash(1), 16),
int(node.getblockhash(10), 16),
int(node.getblockhash(25), 16),
int(node.getblockhash(42), 16),
int(node.getblockhash(96), 16),
int(node.getblockhash(99), 16),
int(node.getblockhash(100), 16),
]
poll_node.send_poll(various_block_hashes)
poll_node.wait_for_avaresponse()
assert_response(poll_node.last_avaresponse,
[AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes])
self.log.info(
"Poll for a selection of blocks, but some are now invalid...")
invalidated_block = node.getblockhash(75)
node.invalidateblock(invalidated_block)
# We need to send the coin to a new address in order to make sure we do
# not regenerate the same block.
node.generatetoaddress(
30, 'bchreg:pqv2r67sgz3qumufap3h2uuj0zfmnzuv8v7ej0fffv')
node.reconsiderblock(invalidated_block)
poll_node.send_poll(various_block_hashes)
poll_node.wait_for_avaresponse()
assert_response(poll_node.last_avaresponse,
[AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:5]] +
[AvalancheVote(BLOCK_REJECTED, h) for h in various_block_hashes[-3:]])
self.log.info("Poll for unknown blocks...")
various_block_hashes = [
int(node.getblockhash(0), 16),
int(node.getblockhash(25), 16),
int(node.getblockhash(42), 16),
various_block_hashes[5],
various_block_hashes[6],
various_block_hashes[7],
random.randrange(1 << 255, (1 << 256) - 1),
random.randrange(1 << 255, (1 << 256) - 1),
random.randrange(1 << 255, (1 << 256) - 1),
]
poll_node.send_poll(various_block_hashes)
poll_node.wait_for_avaresponse()
assert_response(poll_node.last_avaresponse,
[AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:3]] +
[AvalancheVote(BLOCK_REJECTED, h) for h in various_block_hashes[3:6]] +
[AvalancheVote(BLOCK_UNKNOWN, h) for h in various_block_hashes[-3:]])
if __name__ == '__main__':
AvalancheTest().main()
| 37.366197
| 95
| 0.637392
|
import random
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.messages import AvalancheVote, CInv, msg_avapoll
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until
from test_framework import schnorr
BLOCK_ACCEPTED = 0
BLOCK_REJECTED = 1
BLOCK_UNKNOWN = -1
class TestNode(P2PInterface):
def __init__(self):
self.last_avaresponse = None
super().__init__()
def on_avaresponse(self, message):
self.last_avaresponse = message.response
def send_poll(self, hashes):
msg = msg_avapoll()
for h in hashes:
msg.poll.invs.append(CInv(2, h))
self.send_message(msg)
def wait_for_avaresponse(self, timeout=10):
self.sync_with_ping()
def test_function():
m = self.last_message.get("avaresponse")
return m is not None and m != self.last_avaresponse
wait_until(test_function, timeout=timeout, lock=mininode_lock)
class AvalancheTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-enableavalanche=1', '-avacooldown=0']]
def run_test(self):
node = self.nodes[0]
poll_node = TestNode()
node.add_p2p_connection(poll_node)
poll_node.wait_for_verack()
poll_node.sync_with_ping()
address = node.get_deterministic_priv_key().address
node.generatetoaddress(100, address)
avakey = bytes.fromhex(node.getavalanchekey())
self.log.info("Poll for the chain tip...")
best_block_hash = int(node.getbestblockhash(), 16)
poll_node.send_poll([best_block_hash])
poll_node.wait_for_avaresponse()
def assert_response(response, expected):
r = response.response
assert_equal(r.cooldown, 0)
assert schnorr.verify(response.sig, avakey, r.get_hash())
votes = r.votes
self.log.info("response: {}".format(repr(response)))
assert_equal(len(votes), len(expected))
for i in range(0, len(votes)):
assert_equal(repr(votes[i]), repr(expected[i]))
assert_response(poll_node.last_avaresponse, [
AvalancheVote(BLOCK_ACCEPTED, best_block_hash)])
self.log.info("Poll for a selection of blocks...")
various_block_hashes = [
int(node.getblockhash(0), 16),
int(node.getblockhash(1), 16),
int(node.getblockhash(10), 16),
int(node.getblockhash(25), 16),
int(node.getblockhash(42), 16),
int(node.getblockhash(96), 16),
int(node.getblockhash(99), 16),
int(node.getblockhash(100), 16),
]
poll_node.send_poll(various_block_hashes)
poll_node.wait_for_avaresponse()
assert_response(poll_node.last_avaresponse,
[AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes])
self.log.info(
"Poll for a selection of blocks, but some are now invalid...")
invalidated_block = node.getblockhash(75)
node.invalidateblock(invalidated_block)
node.generatetoaddress(
30, 'bchreg:pqv2r67sgz3qumufap3h2uuj0zfmnzuv8v7ej0fffv')
node.reconsiderblock(invalidated_block)
poll_node.send_poll(various_block_hashes)
poll_node.wait_for_avaresponse()
assert_response(poll_node.last_avaresponse,
[AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:5]] +
[AvalancheVote(BLOCK_REJECTED, h) for h in various_block_hashes[-3:]])
self.log.info("Poll for unknown blocks...")
various_block_hashes = [
int(node.getblockhash(0), 16),
int(node.getblockhash(25), 16),
int(node.getblockhash(42), 16),
various_block_hashes[5],
various_block_hashes[6],
various_block_hashes[7],
random.randrange(1 << 255, (1 << 256) - 1),
random.randrange(1 << 255, (1 << 256) - 1),
random.randrange(1 << 255, (1 << 256) - 1),
]
poll_node.send_poll(various_block_hashes)
poll_node.wait_for_avaresponse()
assert_response(poll_node.last_avaresponse,
[AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:3]] +
[AvalancheVote(BLOCK_REJECTED, h) for h in various_block_hashes[3:6]] +
[AvalancheVote(BLOCK_UNKNOWN, h) for h in various_block_hashes[-3:]])
if __name__ == '__main__':
AvalancheTest().main()
| true
| true
|
f71a0d98d569fd7b3be4fc2f4b330fae23d90e4b
| 132,009
|
py
|
Python
|
tofu/geom/_core_optics.py
|
Didou09/tofu
|
4a4e1f058bab8e7556ed9d518f90807cec605476
|
[
"MIT"
] | 6
|
2016-09-15T17:01:19.000Z
|
2017-03-06T22:53:10.000Z
|
tofu/geom/_core_optics.py
|
Didou09/tofu
|
4a4e1f058bab8e7556ed9d518f90807cec605476
|
[
"MIT"
] | 9
|
2016-09-14T17:23:52.000Z
|
2017-04-13T07:30:07.000Z
|
tofu/geom/_core_optics.py
|
Didou09/tofu
|
4a4e1f058bab8e7556ed9d518f90807cec605476
|
[
"MIT"
] | null | null | null |
"""
This module is the geometrical part of the ToFu general package
It includes all functions and object classes necessary for tomography on Tokamaks
"""
# Built-in
import sys
import os
import warnings
import copy
# Common
import numpy as np
import scipy.interpolate as scpinterp
import scipy.stats as scpstats
import datetime as dtm
import matplotlib.pyplot as plt
import matplotlib as mpl
# ToFu-specific
from tofu import __version__ as __version__
import tofu.pathfile as tfpf
import tofu.utils as utils
from . import _def as _def
from . import _GG as _GG
from . import _core
from . import _check_optics
from . import _comp_optics as _comp_optics
from . import _plot_optics as _plot_optics
import tofu.spectro._rockingcurve as _rockingcurve
__all__ = ['CrystalBragg']
_Type = 'Tor'
_NTHREADS = 16
# rotate / translate instance
_RETURN_COPY = False
_USE_NON_PARALLELISM = True
"""
###############################################################################
###############################################################################
Ves class and functions
###############################################################################
###############################################################################
"""
class CrystalBragg(utils.ToFuObject):
""" A class defining crystals for Bragg diffraction
A crystal can be of Type flat, cylindrical or spherical
It is characterized by its:
- geometry (Type, dimensions, curvature radii and position/orientation)
- Material and lattice
- Bragg parameters (angle vs lambda)
Parameters
----------
Id : str / tfpf.ID
A name string or a pre-built tfpf.ID class to be used to identify this
particular instance, if a string is provided, it is fed to tfpf.ID()
dgeom : dict
An array (2,N) or (N,2) defining the contour of the vacuum vessel in a
cross-section, if not closed, will be closed automatically
dspectral: str
Flag indicating whether the vessel will be a torus ('Tor') or a linear
device ('Lin')
SavePath : None / str
If provided, forces the default saving path of the object to the
provided value
"""
# Fixed (class-wise) dictionary of default properties
_ddef = {
'Id': {
'shot': 0, 'Exp': 'dummy', 'Diag': 'dummy',
'include': [
'Mod', 'Cls', 'Exp', 'Diag', 'Name', 'shot', 'version',
],
},
'dgeom': {'Type': 'sph', 'Typeoutline': 'rect'},
'dmat': {},
'dbragg': {'braggref': np.pi/4.},
'dmisc': {'color': 'k'},
}
_dplot = {'cross':{'Elt':'P',
'dP':{'color':'k','lw':2},
'dI':{'color':'k','ls':'--','marker':'x','ms':8,'mew':2},
'dBs':{'color':'b','ls':'--','marker':'x','ms':8,'mew':2},
'dBv':{'color':'g','ls':'--','marker':'x','ms':8,'mew':2},
'dVect':{'color':'r','scale':10}},
'hor':{'Elt':'P',
'dP':{'color':'k','lw':2},
'dI':{'color':'k','ls':'--'},
'dBs':{'color':'b','ls':'--'},
'dBv':{'color':'g','ls':'--'},
'Nstep':50},
'3d':{}}
# _DEFLAMB = 3.971561e-10
# _DEFNPEAKS = 12
# _DREFLECT_DTYPES = {'specular':0, 'diffusive':1, 'ccube':2}
# Does not exist beofre Python 3.6 !!!
def __init_subclass__(cls, color='k', **kwdargs):
# Python 2
super(CrystalBragg,cls).__init_subclass__(**kwdargs)
# Python 3
#super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(CrystalBragg._ddef)
cls._dplot = copy.deepcopy(CrystalBragg._dplot)
cls._set_color_ddef(cls._color)
@classmethod
def _set_color_ddef(cls, color):
cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color)
def __init__(self, dgeom=None, dmat=None, dbragg=None,
Id=None, Name=None, Exp=None, Diag=None, shot=None,
fromdict=None, sep=None,
SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude, color=None):
# To replace __init_subclass__ for Python 2
if sys.version[0]=='2':
self._dstrip = utils.ToFuObjectBase._dstrip.copy()
self.__class__._strip_init()
# Create a dplot at instance level
self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs['self']
# super()
super(CrystalBragg,self).__init__(**kwdargs)
def _reset(self):
# super()
super(CrystalBragg,self)._reset()
self._dgeom = dict.fromkeys(self._get_keys_dgeom())
self._dmat = dict.fromkeys(self._get_keys_dmat())
self._dbragg = dict.fromkeys(self._get_keys_dbragg())
self._dmisc = dict.fromkeys(self._get_keys_dmisc())
#self._dplot = copy.deepcopy(self.__class__._ddef['dplot'])
@classmethod
def _checkformat_inputs_Id(cls, Id=None, Name=None,
Exp=None, Diag=None, shot=None, Type=None,
include=None,
**kwdargs):
if Id is not None:
assert isinstance(Id,utils.ID)
Name, Exp, Type = Id.Name, Id.Exp, Id.Type
if Type is None:
Type = cls._ddef['dgeom']['Type']
if Exp is None:
Exp = cls._ddef['Id']['Exp']
if Diag is None:
Diag = cls._ddef['Id']['Diag']
if shot is None:
shot = cls._ddef['Id']['shot']
if include is None:
include = cls._ddef['Id']['include']
dins = {'Name':{'var':Name, 'cls':str},
'Exp': {'var':Exp, 'cls':str},
'Diag': {'var':Diag, 'cls':str},
'shot': {'var':shot, 'cls':int},
'Type': {'var':Type, 'in':['sph']},
'include':{'var':include, 'listof':str}}
dins, err, msg = cls._check_InputsGeneric(dins)
if err:
raise Exception(msg)
kwdargs.update({'Name':Name, 'shot':shot,
'Exp':Exp, 'Diag':Diag, 'Type':Type,
'include':include})
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_dgeom(sino=True):
largs = ['dgeom']
return largs
@staticmethod
def _get_largs_dmat():
largs = ['dmat']
return largs
@staticmethod
def _get_largs_dbragg():
largs = ['dbragg']
return largs
@staticmethod
def _get_largs_dmisc():
largs = ['color']
return largs
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dgeom():
lk = ['Type', 'Typeoutline',
'summit', 'center', 'extenthalf', 'surface',
'nin', 'nout', 'e1', 'e2', 'rcurve',
'move', 'move_param', 'move_kwdargs']
return lk
@staticmethod
def _get_keys_dmat():
lk = ['formula', 'density', 'symmetry',
'lengths', 'angles', 'cut', 'd',
'alpha', 'beta', 'nin', 'nout', 'e1', 'e2']
return lk
@staticmethod
def _get_keys_dbragg():
lk = ['rockingcurve', 'lambref', 'braggref']
return lk
@staticmethod
def _get_keys_dmisc():
lk = ['color']
return lk
###########
# _init
###########
def _init(self, dgeom=None, dmat=None, dbragg=None,
color=None, **kwdargs):
allkwds = dict(locals(), **kwdargs)
largs = self._get_largs_dgeom()
kwds = self._extract_kwdargs(allkwds, largs)
self.set_dgeom(**kwds)
largs = self._get_largs_dmat()
kwds = self._extract_kwdargs(allkwds, largs)
self.set_dmat(**kwds)
largs = self._get_largs_dbragg()
kwds = self._extract_kwdargs(allkwds, largs)
self.set_dbragg(**kwds)
largs = self._get_largs_dmisc()
kwds = self._extract_kwdargs(allkwds, largs)
self._set_dmisc(**kwds)
self._dstrip['strip'] = 0
###########
# set dictionaries
###########
def set_dgeom(self, dgeom=None):
self._dgeom = _check_optics._checkformat_dgeom(
dgeom=dgeom, ddef=self._ddef['dgeom'],
valid_keys=self._get_keys_dgeom(),
)
if self._dgeom['move'] is not None:
self.set_move(
move=self._dgeom['move'],
param=self._dgeom['move_param'],
**self._dgeom['move_kwdargs'],
)
def set_dmat(self, dmat=None):
self._dmat = _check_optics._checkformat_dmat(
dmat=dmat, dgeom=self._dgeom,
ddef=self._ddef['dmat'],
valid_keys=self._get_keys_dmat()
)
def set_dbragg(self, dbragg=None):
self._dbragg = _check_optics._checkformat_dbragg(
dbragg=dbragg,
ddef=self._ddef['dbragg'],
valid_keys=self._get_keys_dbragg(),
dmat=self._dmat,
)
def _set_color(self, color=None):
color = _check_optics._checkformat_inputs_dmisc(
color=color, ddef=self._ddef,
)
self._dmisc['color'] = color
self._dplot['cross']['dP']['color'] = color
self._dplot['hor']['dP']['color'] = color
# self._dplot['3d']['dP']['color'] = color
def _set_dmisc(self, color=None):
self._set_color(color)
###########
# strip dictionaries
###########
def _strip_dgeom(self, lkeep=None):
lkeep = self._get_keys_dgeom()
utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep)
def _strip_dmat(self, lkeep=None):
lkeep = self._get_keys_dmat()
utils.ToFuObject._strip_dict(self._dmat, lkeep=lkeep)
def _strip_dbragg(self, lkeep=None):
lkeep = self._get_keys_dbragg()
utils.ToFuObject._strip_dict(self._dbragg, lkeep=lkeep)
def _strip_dmisc(self, lkeep=['color']):
utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep)
###########
# rebuild dictionaries
###########
def _rebuild_dgeom(self, lkeep=None):
lkeep = self._get_keys_dgeom()
reset = utils.ToFuObject._test_Rebuild(self._dgeom, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dgeom,
lkeep=lkeep, dname='dgeom')
self._set_dgeom(dgeom=self._dgeom)
def _rebuild_dmat(self, lkeep=None):
lkeep = self._get_keys_dmat()
reset = utils.ToFuObject._test_Rebuild(self._dmat, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dmat,
lkeep=lkeep, dname='dmat')
self.set_dmat(self._dmat)
def _rebuild_dbragg(self, lkeep=None):
lkeep = self._get_keys_dbragg()
reset = utils.ToFuObject._test_Rebuild(self._dbragg, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dbragg,
lkeep=lkeep, dname='dbragg')
self.set_dbragg(self._dbragg)
def _rebuild_dmisc(self, lkeep=['color']):
reset = utils.ToFuObject._test_Rebuild(self._dmisc, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dmisc,
lkeep=lkeep, dname='dmisc')
self._set_dmisc(color=self.dmisc['color'])
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip['allowed'] = [0,1]
nMax = max(cls._dstrip['allowed'])
doc = """
1: Remove nothing"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax)
if sys.version[0]=='2':
cls.strip.__func__.__doc__ = doc
else:
cls.strip.__doc__ = doc
def strip(self, strip=0):
# super()
super(CrystalBragg, self).strip(strip=strip)
def _strip(self, strip=0):
if strip==0:
self._rebuild_dgeom()
self._rebuild_dmat()
self._rebuild_dbragg()
self._rebuild_dmisc()
else:
self._strip_dgeom()
self._strip_dmat()
self._strip_dbragg()
self._strip_dmisc()
def _to_dict(self):
dout = {'dgeom':{'dict':self._dgeom, 'lexcept':None},
'dmat':{'dict':self._dmat, 'lexcept':None},
'dbragg':{'dict':self._dbragg, 'lexcept':None},
'dmisc':{'dict':self._dmisc, 'lexcept':None},
'dplot':{'dict':self._dplot, 'lexcept':None}}
return dout
def _from_dict(self, fd):
self._dgeom.update(**fd.get('dgeom', {}))
self._dmat.update(**fd.get('dmat', {}))
self._dbragg.update(**fd.get('dbragg', {}))
self._dmisc.update(**fd.get('dmisc', {}))
self._dplot.update(**fd.get('dplot', {}))
# -----------
# Properties
# -----------
@property
def Type(self):
"""Return the type of structure """
return self._Id.Type
@property
def dgeom(self):
return self._dgeom
@property
def dmat(self):
"""Return the polygon defining the structure cross-section"""
return self._dmat
@property
def dbragg(self):
"""Return the polygon defining the structure cross-section"""
return self._dbragg
@property
def dmisc(self):
return self._dmisc
# @property
# def nin(self):
# return self._dgeom['nin']
# @property
# def nout(self):
# return self._dgeom['nout']
# @property
# def e1(self):
# return self._dgeom['e1']
# @property
# def e2(self):
# return self._dgeom['e2']
@property
def summit(self):
return self._dgeom['summit']
@property
def center(self):
return self._dgeom['center']
@property
def ismobile(self):
return self._dgeom['move'] not in [None, False]
@property
def rockingcurve(self):
if self._dbragg.get('rockingcurve') is not None:
if self._dbragg['rockingcurve'].get('type') is not None:
return self._dbragg['rockingcurve']
raise Exception("rockingcurve was not set!")
# --------------------------------------
# methods for getting unit vectors basis
# --------------------------------------
def get_unit_vectors(self, use_non_parallelism=None):
""" Return the unit vectors (direct orthonormal basis)
Depending on:
use_non_parallelism: True => return the geometrical basis
use_non_parallelism: False => return the mesh basis
"""
if use_non_parallelism is None:
use_non_parallelism = _USE_NON_PARALLELISM
if use_non_parallelism is True:
nout = self._dmat['nout']
e1 = self._dmat['e1']
e2 = self._dmat['e2']
else:
nout = self._dgeom['nout']
e1 = self._dgeom['e1']
e2 = self._dgeom['e2']
return nout, e1, e2, use_non_parallelism
# -----------------
# methods for color
# -----------------
def set_color(self, col):
self._set_color(col)
def get_color(self):
return self._dmisc['color']
# -----------------
# methods for printing
# -----------------
def get_summary(self, sep=' ', line='-', just='l',
table_sep=None, verb=True, return_=False):
""" Summary description of the object content """
# -----------------------
# Build material
col0 = [
'formula', 'symmetry', 'cut', 'density',
'd (A)',
'bragg({:9.6} A) (deg)'.format(self._dbragg['lambref']*1e10),
'Type', 'outline', 'surface (cm²)', 'rcurve', 'rocking curve',
]
ar0 = [self._dmat['formula'], self._dmat['symmetry'],
str(self._dmat['cut']), str(self._dmat['density']),
'{0:5.3f}'.format(self._dmat['d']*1.e10),
str(self._dbragg['braggref']*180./np.pi),
self._dgeom['Type'], self._dgeom['Typeoutline'],
'{0:5.1f}'.format(self._dgeom['surface']*1.e4),
'{0:6.3f}'.format(self._dgeom['rcurve'])]
try:
ar0.append(self.rockingcurve['type'])
except Exception as err:
ar0.append('None')
# -----------------------
# Build geometry
col1 = ['half-extent', 'summit', 'center', 'nout', 'e1',
'alpha', 'beta']
ar1 = [
str(np.round(self._dgeom['extenthalf'], decimals=3)),
str(np.round(self._dgeom['summit'], decimals=2)),
str(np.round(self._dgeom['center'], decimals=2)),
str(np.round(self._dmat['nout'], decimals=3)),
str(np.round(self._dmat['e1'], decimals=3)),
str(np.round(self._dmat['alpha'], decimals=6)),
str(np.round(self._dmat['beta'], decimals=6)),
]
if self._dgeom.get('move') not in [None, False]:
col1 += ['move', 'param']
ar1 += [self._dgeom['move'],
str(np.round(self._dgeom['move_param'], decimals=5))]
if self._dmisc.get('color') is not None:
col1.append('color')
ar1.append(str(self._dmisc['color']))
lcol = [col0, col1]
lar = [ar0, ar1]
return self._get_summary(lar, lcol,
sep=sep, line=line, table_sep=table_sep,
verb=verb, return_=return_)
# -----------------
# methods for moving
# -----------------
def _update_or_copy(self, dgeom, pinhole=None,
return_copy=None,
name=None, diag=None, shot=None):
if return_copy is None:
return_copy = _RETURN_COPY
for kk, vv in self._dgeom.items():
if kk not in dgeom.keys():
dgeom[kk] = vv
if return_copy is True:
if name is None:
name = self.Id.Name + 'copy'
if diag is None:
diag = self.Id.Diag
if shot is None:
diag = self.Id.shot
return self.__class__(dgeom=dgeom,
dbragg=self._dbragg,
dmat=self._dmat,
color=self._dmisc['color'],
Exp=self.Id.Exp,
Diag=diag,
Name=name,
shot=shot,
SavePath=self.Id.SavePath)
else:
dgeom0 = self.dgeom
try:
self.set_dgeom(dgeom=dgeom)
self._dmat = _check_optics._checkformat_dmat(
dmat={
k0: v0 for k0, v0 in self._dmat.items()
if k0 not in ['nin', 'nout', 'e1', 'e2']
},
dgeom=self._dgeom,
ddef=self._ddef['dmat'],
valid_keys=self._get_keys_dmat()
)
except Exception as err:
# Make sure instance does not move
self.set_dgeom(dgeom=dgeom0)
msg = (str(err)
+ "\nAn exception occured during updating\n"
+ " => instance unmoved")
raise Exception(msg)
def _rotate_or_translate(self, func, **kwdargs):
pts = np.array([self._dgeom['summit'], self._dgeom['center']]).T
if 'rotate' in func.__name__:
vect = np.array([
self._dgeom['nout'],
self._dgeom['e1'],
self._dgeom['e2']
]).T
pts, vect = func(pts=pts, vect=vect, **kwdargs)
return {'summit': pts[:, 0], 'center': pts[:, 1],
'nout': vect[:, 0], 'nin': -vect[:, 0],
'e1': vect[:, 1], 'e2': vect[:, 2]}
else:
pts = func(pts=pts, **kwdargs)
return {'summit': pts[:, 0], 'center': pts[:, 1]}
def translate_in_cross_section(self, distance=None, direction_rz=None,
phi=None,
return_copy=None,
diag=None, name=None, shot=None):
""" Translate the instance in the cross-section """
if phi is None:
phi = np.arctan2(*self.summit[1::-1])
msg = ("Poloidal plane was not explicitely specified\n"
+ " => phi set to self.summit's phi ({})".format(phi))
warnings.warn(msg)
dgeom = self._rotate_or_translate(
self._translate_pts_poloidal_plane,
phi=phi, direction_rz=direction_rz, distance=distance)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def translate_3d(self, distance=None, direction=None,
return_copy=None,
diag=None, name=None, shot=None):
""" Translate the instance in provided direction """
dgeom = self._rotate_or_translate(
self._translate_pts_3d,
direction=direction, distance=distance)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def rotate_in_cross_section(self, angle=None, axis_rz=None,
phi=None,
return_copy=None,
diag=None, name=None, shot=None):
""" Rotate the instance in the cross-section """
if phi is None:
phi = np.arctan2(*self.summit[1::-1])
msg = ("Poloidal plane was not explicitely specified\n"
+ " => phi set to self.summit's phi ({})".format(phi))
warnings.warn(msg)
dgeom = self._rotate_or_translate(
self._rotate_pts_vectors_in_poloidal_plane,
axis_rz=axis_rz, angle=angle, phi=phi)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def rotate_around_torusaxis(self, angle=None,
return_copy=None,
diag=None, name=None, shot=None):
""" Rotate the instance around the torus axis """
dgeom = self._rotate_or_translate(
self._rotate_pts_vectors_around_torusaxis,
angle=angle)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def rotate_around_3daxis(self, angle=None, axis=None,
return_copy=None,
diag=None, name=None, shot=None):
""" Rotate the instance around the provided 3d axis """
dgeom = self._rotate_or_translate(
self._rotate_pts_vectors_around_3daxis,
axis=axis, angle=angle)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def set_move(self, move=None, param=None, **kwdargs):
""" Set the default movement parameters
A default movement can be set for the instance, it can be any of the
pre-implemented movement (rotations or translations)
This default movement is the one that will be called when using
self.move()
Specify the type of movement via the name of the method (passed as a
str to move)
Specify, for the geometry of the instance at the time of defining this
default movement, the current value of the associated movement
parameter (angle / distance). This is used to set an arbitrary
difference for user who want to use absolute position values
The desired incremental movement to be performed when calling self.move
will be deduced by substracting the stored param value to the provided
param value. Just set the current param value to 0 if you don't care
about a custom absolute reference.
kwdargs must be a parameters relevant to the chosen method (axis,
direction...)
e.g.:
self.set_move(move='rotate_around_3daxis',
param=0.,
axis=([0.,0.,0.], [1.,0.,0.]))
self.set_move(move='translate_3d',
param=0.,
direction=[0.,1.,0.])
"""
move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs)
self._dgeom['move'] = move
self._dgeom['move_param'] = param
if isinstance(kwdargs, dict) and len(kwdargs) == 0:
kwdargs = None
self._dgeom['move_kwdargs'] = kwdargs
def move(self, param):
""" Set new position to desired param according to default movement
Can only be used if default movement was set before
See self.set_move()
"""
param = self._move(param, dictname='_dgeom')
self._dgeom['move_param'] = param
# -----------------
# methods for rocking curve
# -----------------
def get_rockingcurve_func(self, lamb=None, n=None):
""" Return the rocking curve function
Also return the wavelength (lamb) (in meters) for which it was computed
and the associated reference bragg angle (in rad)
"""
drock = self.rockingcurve
if drock['type'] == 'tabulated-1d':
if lamb is not None and lamb != drock['lamb']:
msg = ("rocking curve was tabulated only for:\n"
+ "\tlamb = {} m\n".format(lamb)
+ " => Please let lamb=None")
raise Exception(msg)
lamb = drock['lamb']
bragg = self._checkformat_bragglamb(lamb=lamb, n=n)
func = scpinterp.interp1d(drock['dangle'] + bragg, drock['value'],
kind='linear', bounds_error=False,
fill_value=0, assume_sorted=True)
elif drock['type'] == 'tabulated-2d':
lmin, lmax = drock['lamb'].min(), drock['lamb'].max()
if lamb is None:
lamb = drock['lamb']
if lamb < lmin or lamb > lmax:
msg = ("rocking curve was tabulated only in interval:\n"
+ "\tlamb in [{}; {}] m\n".format(lmin, lmax)
+ " => Please set lamb accordingly")
raise Exception(msg)
bragg = self._checkformat_bragglamb(lamb=lamb, n=n)
def func(angle, lamb=lamb, bragg=bragg, drock=drock):
return scpinterp.interp2d(drock['dangle']+bragg, drock['lamb'],
drock['value'], kind='linear',
bounds_error=False, fill_value=0,
assume_sorted=True)(angle, lamb)
else:
# TBC
raise NotImplementedError
def func(angle, d=d, delta_bragg=delta_bragg,
Rmax=drock['Rmax'], sigma=drock['sigma']):
core = sigma**2/((angle - (bragg+delta_bragg))**2 + sigma**2)
if Rmax is None:
return core/(sigma*np.pi)
else:
return Rmax*core
return func, lamb, bragg
def plot_rockingcurve(self, lamb=None, n=None, sigma=None,
npts=None, color=None, ang_units=None,
dmargin=None, fs=None, ax=None, legend=None):
drock = self.rockingcurve
func, lamb, bragg = self.get_rockingcurve_func(lamb=lamb, n=n)
axtit = 'Rocking curve for ' + self.Id.Name
return _plot_optics.CrystalBragg_plot_rockingcurve(
func=func, bragg=bragg, lamb=lamb,
sigma=sigma, npts=npts,
ang_units=ang_units, axtit=axtit, color=color,
fs=fs, ax=ax, legend=legend)
def compute_rockingcurve(
self, ih=None, ik=None, il=None, lamb=None,
use_non_parallelism=None, na=None,
alpha_limits=None,
therm_exp=None, plot_therm_exp=None,
plot_asf=None, plot_power_ratio=None,
plot_asymmetry=None, plot_cmaps=None,
verb=None, returnas=None,
):
return _rockingcurve.compute_rockingcurve(
ih=ih, ik=ik, il=il, lamb=lamb,
use_non_parallelism=use_non_parallelism, na=na,
alpha_limits=alpha_limits,
therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,
plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,
plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,
verb=None, returnas=None,
)
def plot_var_temp_changes_wavelengths(
self, ih=None, ik=None, il=None, lambdas=None,
use_non_parallelism=None, na=None,
alpha_limits=None,
therm_exp=None, plot_therm_exp=None,
plot_asf=None, plot_power_ratio=None,
plot_asymmetry=None, plot_cmaps=None,
quantity=None,
curv_radius=None, pixel_size=None,
):
return _rockingcurve.plot_var_temp_changes_wavelengths(
ih=ih, ik=ik, il=il, lambdas=lambdas,
use_non_parallelism=use_non_parallelism, na=na,
alpha_limits=alpha_limits,
therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,
plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,
plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,
quantity=quantity,
curv_radius=curv_radius, pixel_size=pixel_size,
)
# -----------------
# methods for surface and contour sampling
# -----------------
def sample_outline_plot(self, use_non_parallelism=None, res=None):
if self._dgeom['Type'] == 'sph':
if self._dgeom['Typeoutline'] == 'rect':
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
outline = _comp_optics.CrystBragg_sample_outline_plot_sphrect(
self._dgeom['summit'] - nout*self._dgeom['rcurve'],
nout,
e1,
e2,
self._dgeom['rcurve'],
self._dgeom['extenthalf'],
res,
)
else:
raise NotImplementedError
else:
raise NotImplementedError
return outline
# -----------------
# methods for surface and contour sampling
# -----------------
def _checkformat_bragglamb(self, bragg=None, lamb=None, n=None):
lc = [lamb is not None, bragg is not None]
if not any(lc):
lamb = self._dbragg['lambref']
lc[0] = True
assert np.sum(lc) == 1, "Provide lamb xor bragg!"
if lc[0]:
bragg = self.get_bragg_from_lamb(
np.atleast_1d(lamb), n=n,
)
else:
bragg = np.atleast_1d(bragg)
return bragg
def _checkformat_get_Rays_from(self, phi=None, bragg=None):
assert phi is not None
assert bragg is not None
bragg = np.atleast_1d(bragg)
phi = np.atleast_1d(phi)
nrays = max(phi.size, bragg.size)
if not phi.shape == bragg.shape:
if phi.size == 1:
phi = np.full(bragg.shape, phi[0])
elif bragg.size == 1:
bragg = np.full(phi.shape, bragg[0])
else:
msg = "phi and bragg/lamb must have the same shape!\n"
msg += " phi.shape: %s\n"%str(phi.shape)
msg += " bragg/lamb.shape: %s\n"%str(bragg.shape)
raise Exception(msg)
return phi, bragg
def _get_rays_from_cryst(
self,
phi=None, bragg=None,
lamb=None, n=None,
dtheta=None, psi=None,
ntheta=None, npsi=None,
use_non_parallelism=None,
include_summit=None,
grid=None,
):
# Get phi, bragg
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb)
phi, bragg = self._checkformat_get_Rays_from(phi=phi, bragg=bragg)
# assert phi.ndim == 1
# Get local summits, nout, e1, e2
pts_start, nout, e1, e2 = self.get_local_noute1e2(
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
)
nin = -nout
# reshape for broadcast
if grid is True:
nin = nin[..., None]
e1 = e1[..., None]
e2 = e2[..., None]
else:
assert bragg.shape == nin.shape[1:]
# Compute start point (D) and unit vectors (us)
vect = (
np.sin(bragg)*nin
+ np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2)
)
return pts_start, vect
def get_rays_from_cryst(
self,
phi=None, bragg=None,
lamb=None, n=None,
dtheta=None, psi=None,
use_non_parallelism=None,
ntheta=None, npsi=None,
include_summit=None,
det=None, config=None, length=None,
returnas=None,
return_xixj=None,
grid=None,
):
""" Return rays stemming from the crystal
The rays are defined by a start point (on the crystal surface) and
either an end point or a unit vector
Start points
------------
The start point is the crystal summit by default
But that can be changed using:
- ('dtheta', 'psi'): can be arbitrary but with same shape
up to 4 dimensions
- ('ntheta', 'npsi', 'include_summit'): will be used to
compute the envelop (contour) of the crystal, as 2 1d arrays
These arguments are fed to self.get_local_noute1e2() which will compute
the start points and return them as shape (3, psi.shape)
End point or unit vector
------------------------
End point are computed automatically if:
- 'config' is provided: ray-tracing is done like for any camera
- 'det' is provided: xi and xj can be computed
Returning format
----------------
The rays can be returned as:
- '(pts, vect, length)': a tuple of:
- pts: array of start points on the crystal
(only the summit by default)
- vect: array
- length:
- '(pts, vect)': a tuple with only pts and vect
- 'pts': a tuple, where both start and end points are returned
All arrays represent (X, Y, Z) cartesian coordinates in the tokamak's
frame
Optionally, can return the (xi, xj) coordinates of points if a detector
(det) is provided.
"""
# -----------
# Check input
if returnas is None:
returnas = 'pts'
if return_xixj is None:
return_xixj = False
lret = ['(pts, vect, length)', '(pts, vect)', 'pts'] # , object]
if returnas not in lret:
msg = (
"Arg returnas must be in:\n"
+ "\t- '(pts, vect, length)': starting points, unit vector,"
+ " length\n"
+ "\t- 'pts': starting and ending points\n"
# + "\t- object: CamLOS1D instance\n"
)
raise Exception(msg)
det = self._checkformat_det(det)
if length is None:
length = 10.
if grid is None:
try:
grid = bragg.shape != dtheta.shape
except Exception as err:
grid = True
# -----------
# Get starting point and vectors
pts_start, vect = self._get_rays_from_cryst(
phi=phi, bragg=bragg,
lamb=lamb, n=n,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
grid=grid,
)
if returnas == '(pts, vect)':
return pts_start, vect
# -----------
# Get length (minimum between conf, det, length)
vshape = vect.shape
dk = {
k0: np.full(vshape[1:], np.nan)
for k0 in ['config', 'det', 'length']
}
xi, xj = None, None
if config is not None:
# Here insert ray-tracing from config!
if vshape != pts_start.shape:
if len(vshape) == 3 and len(pts_start.shape) == 2:
D = np.reshape(
np.repeat(pts_start[..., None], vshape[-1], axis=-1),
(3, -1),
)
u = vect.reshape((3, -1))
else:
msg = (
"Not treated case!\n"
f"\t- pts_start.shape: {pts_start.shape}\n"
f"\t- vect.shape: {vshape}\n"
)
raise Exception(msg)
else:
if len(vshape) > 2:
D = pts_start.reshape((3, -1))
u = vect.reshape((3, -1))
else:
D = pts_start
u = vect
rays = _core.Rays(
dgeom=(D, u),
config=config,
strict=False,
Name='dummy',
Diag='dummy',
Exp='dummy',
)
if u.shape != vshape:
kout = rays.dgeom['kOut'].reshape(vshape[1:])
else:
kout = rays.dgeom['kOut']
dk['config'] = kout
if det is not None and det is not False:
shape = tuple([3] + [1 for ii in range(vect.ndim-1)])
cent = det['cent'].reshape(shape)
nout = det['nout'].reshape(shape)
if grid is True:
k = (
np.sum((cent-pts_start[..., None])*nout, axis=0)
/ np.sum(vect*nout, axis=0)
)
else:
k = (
np.sum((cent-pts_start)*nout, axis=0)
/ np.sum(vect*nout, axis=0)
)
dk['det'][k >= 0.] = k[k >= 0.]
if return_xixj is True:
if grid:
pts_end = pts_start[..., None] + dk['det'][None, ...]*vect
else:
pts_end = pts_start + dk['det'][None, ...]*vect
ei = det['ei'].reshape(shape)
ej = det['ej'].reshape(shape)
xi = np.sum((pts_end - cent)*ei, axis=0)
xj = np.sum((pts_end - cent)*ej, axis=0)
if length is not None:
dk['length'][:] = length
k = np.nanmin([vv for vv in dk.values() if vv is not None], axis=0)
# -----------
# return
if returnas == 'pts':
if grid:
pts_end = pts_start[..., None] + k[None, ...]*vect
if return_xixj:
return pts_start, pts_end, xi, xj
else:
return pts_start, pts_end
else:
pts_end = pts_start + k[None, ...]*vect
if return_xixj:
return pts_start, pts_end, xi, xj
else:
return pts_start, pts_end
elif returnas == '(pts, vect, length)':
if return_xixj:
return pts_start, vect, k, xi, xj
else:
return pts_start, vect, k
# -----------------
# methods for crystal splitting
# -----------------
def split(self, direction=None, nb=None):
# ------------
# check inputs
if direction is None:
direction = 'e1'
if direction not in ['e1', 'e2']:
msg = (
"Arg direction must be either:\n"
"\t- 'e1': split along vector 'e1' (~horizontally)\n"
"\t- 'e2': split along vector 'e2' (~vertically)\n"
f"You provided: {direction}"
)
raise Exception(msg)
if nb is None:
nb = 2
if not (isinstance(nb, int) and nb > 1):
msg = (
"Arg nb must be a int > 1 !\n"
"It specifies the number of equal parts desired\n"
f"You provided: {nb}"
)
raise Exception(msg)
# ---------------
# split
edges = np.linspace(-1, 1, nb+1)
mid = 0.5*(edges[1:] + edges[:-1])[None, :]
if direction == 'e2':
dtheta = mid*self._dgeom['extenthalf'][1]
psi = np.zeros((1, nb), dtype=float)
extenthalf = [
self._dgeom['extenthalf'][0],
self._dgeom['extenthalf'][1]/nb,
]
else:
dtheta = np.zeros((1, nb), dtype=float)
psi = mid*self._dgeom['extenthalf'][0]
extenthalf = [
self._dgeom['extenthalf'][0]/nb,
self._dgeom['extenthalf'][1],
]
nouts = (
np.cos(dtheta)*(
self._dgeom['nout'][:, None]*np.cos(psi)
+ self._dgeom['e1'][:, None]*np.sin(psi)
)
+ np.sin(dtheta)*self._dgeom['e2'][:, None]
)
e1s = (
-self._dgeom['nout'][:, None]*np.sin(psi)
+ self._dgeom['e1'][:, None]*np.cos(psi)
)
e2s = np.array([
nouts[1, :]*e1s[2, :] - nouts[2, :]*e1s[1, :],
nouts[2, :]*e1s[0, :] - nouts[0, :]*e1s[2, :],
nouts[0, :]*e1s[1, :] - nouts[1, :]*e1s[0, :],
])
# -----------
# Construct list of instances
lobj = [
self.__class__(
dgeom={
'rcurve': self._dgeom['rcurve'],
'center': self._dgeom['center'],
'nout': nouts[:, ii],
'e1': e1s[:, ii],
'e2': e2s[:, ii],
'extenthalf': extenthalf,
},
dmat={
k0: v0 for k0, v0 in self._dmat.items()
if k0 not in ['nin', 'nout', 'e1', 'e2']
},
dbragg=dict(self._dbragg),
Name=f"{self.Id.Name}{ii}",
Exp=self.Id.Exp,
)
for ii in range(nb)
]
return lobj
# -----------------
# methods for general plotting
# -----------------
def plot(
self, dcryst=None,
phi=None, bragg=None, lamb=None, pts=None,
n=None, config=None, det=None, length=None,
dtheta=None, psi=None,
ntheta=None, npsi=None,
include_summit=None,
dax=None, proj=None, res=None, element=None,
color=None, ddet=None,
dleg=None, draw=True, dmargin=None,
use_non_parallelism=None, grid=None,
rays_npts=None, rays_color=None,
fs=None, wintit=None, tit=None,
):
""" Plot the crystal in desired projeection
The projection is 3d, cross-section or horizontal
Optionaly add rays reflected on cryst at:
- lamb / phi: desired wavelength and incidence angle
and either:
- psi, dtheta : desired pts on the crystal surface
- pts: emitted from desired pts (e.g.: in the plasma)
(need to be refresh with get_rays_from_cryst method
if new pts are wanted)
Parameters
----------
dax: None / dict
dict of axes to be used, with keys:
- 'cross': axe where to plot cross-section view
- 'hor': axe where to plot horizontal (from top) view
- '3d': axe where to plot 3d view
if None, a new figure and axes are created
proj: None / str
key indicating which plot to make:
- 'cross': cross-section projection
- 'hor': horizontal projection
- 'all': cross-section + horizontal view
- '3d': 3d view
element: None / str
char string where each letter indicates an element to plot
- 'o': outline (edges of crystal)
- 's': summit (geometrical center of the crystal)
- 'c': center (of the sphere of curvature)
- 'r': rowland circle (plotted in e1 direction)
- 'v': local unit vectors e1, e2, nout
If None, default to 'oscvr'
res: None / float
Resolution for the discretization of the outline
dcryst: None / dict
dict of dict for plotting the various elements of the crystal:
- 'outline': dict of properties fed to plot()
- 'cent': dict of properties fed to plot()
- 'summit': dict of properties fed to plot()
- 'rowland': dict of properties fed to plot()
- 'vectors': dict of properties fed to quiver()
ddet: None / dict
dict of dict for plotting the various elements of the det:
- 'outline': dict of properties fed to plot()
- 'cent': dict of properties fed to plot()
- 'vectors': dict of properties fed to quiver()
color: None / str / tuple
color to be used for plotting
Overwrites all colors in dcryst and ddet
det: None / dict
Optionnal associated detector to be plotted, as a dict with keys:
- 'cent': 1d array of cartesian coordinates of the center
- 'nout': 1d array of cartesian coordinates of unit vector
oriented towards the crystal
- 'ei': 1d array of cartesian coordinates of unit vector
- 'ej': 1d array of cartesian coordinates of unit vector
- 'outline': 2d array of outline coordinates in (ei, ej)
dleg: None / dict
dict of properties to be passed to plt.legend()
if False legend is not plotted
use_non_parallelism: None / str
Return the unit vectors (direct orthonormal basis)
Depending on:
- use_non_parallelism: True => return the geometrical basis
- use_non_parallelism: False => return the mesh basis
"""
if det is None:
det = False
det = self._checkformat_det(det)
lc = [
dtheta is not None or psi is not None or phi is not None,
pts is not None
]
if np.sum(lc) == 2:
msg = (
"For ray tracing, please provide either:\n"
+ "\t- dtheta, psi, phi, lamb/bragg\n"
+ "\t- pts, lamb/bragg\n"
)
raise Exception(msg)
# Add rays?
if lc[0]:
# Get one way
# pts.shape = (3, nlamb, npts, ndtheta)
pts_summit, pts1 = self.get_rays_from_cryst(
phi=phi, lamb=lamb, bragg=bragg,
n=n, use_non_parallelism=use_non_parallelism,
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
config=config, det=det,
returnas='pts', return_xixj=False,
grid=grid,
)
# Get the other way
pts2, xi, xj = self.get_rays_from_cryst(
phi=phi+np.pi, lamb=lamb, bragg=bragg,
n=n, use_non_parallelism=use_non_parallelism,
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
config=config, det=det,
returnas='pts', return_xixj=True,
grid=grid,
)[1:]
elif lc[1]:
c0 = (
isinstance(pts, np.ndarray)
and pts.ndim == 2
and pts.shape[0] == 3
)
if not c0:
msg = ("Arg pts must be a (3, npts) np.array!")
raise Exception(msg)
# pts.shape = (nlamb, npts, ndtheta)
dtheta, psi, phi, bragg, _, _ = self.calc_raytracing_from_lambpts(
pts=pts,
lamb=lamb,
ndtheta=ntheta,
)
pts_summit, pts2, xi, xj = self.get_rays_from_cryst(
phi=phi+np.pi, lamb=None, bragg=bragg,
n=n, use_non_parallelism=use_non_parallelism,
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
config=config, det=det,
returnas='pts', return_xixj=True,
grid=grid,
)
pts1 = np.repeat(
np.repeat(
np.repeat(
pts[:, None, :], dtheta.shape[0], axis=1,
)[..., None],
dtheta.shape[2],
axis=-1,
)[..., None],
2,
axis=-1,
)
else:
pts_summit, pts1, pts2, xi, xj = None, None, None, None, None
return _plot_optics.CrystalBragg_plot(
cryst=self, dcryst=dcryst,
det=det, ddet=ddet,
dax=dax, proj=proj, res=res, element=element,
color=color,
pts_summit=pts_summit, pts1=pts1, pts2=pts2,
xi=xi, xj=xj,
rays_color=rays_color, rays_npts=rays_npts,
dleg=dleg, draw=draw, fs=fs, dmargin=dmargin,
use_non_parallelism=use_non_parallelism,
wintit=wintit, tit=tit,
)
# -----------------
# methods for generic first-approx
# -----------------
def get_phi_from_magaxis_summit(
self,
axis_r,
axis_z,
axis_npts=None,
lamb=None,
lamb_tol=None,
bragg=None,
n=None,
use_non_parallelism=None,
):
""" Return phi of a magnteic axis (at lamb with tolerance)
axis_r and axis_z must be np.ndarrays of the same shape
The magnetic axis is discretized toroidally in axis_npts (def: 1000)
The pts closest to the chosen lamb are picked
If no pts is found within tolerance, an error is raised
"""
# --------------------
# Check / format input
if axis_npts is None:
axis_npts = 1000
axis_r = np.atleast_1d(axis_r)
axis_z = np.atleast_1d(axis_z)
assert axis_r.shape == axis_z.shape
if lamb_tol is None:
lamb_tol = 0.01e-10
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
# --------------
# Disretize axis
shaperz = axis_r.shape
phi_ax = np.full(shaperz, np.nan)
# Compute phi
theta_cryst = np.arctan2(
self._dgeom['summit'][1],
self._dgeom['summit'][0],
)
theta_ax = theta_cryst + np.pi/2*np.linspace(-1, 1, axis_npts)
shapetheta = np.r_[[1 for ii in shaperz], axis_npts]
theta_ax = theta_ax.reshape(shapetheta)
axis_x = (axis_r[..., None] * np.cos(theta_ax)).ravel()
axis_y = (axis_r[..., None] * np.sin(theta_ax)).ravel()
axis_z = (np.repeat(axis_z[..., None], axis_npts, axis=-1)).ravel()
# ----------------------------------------------
# Compute bragg, phi, lamb of each point on axis
(
bragg_ax_full, phi_ax_full, lamb_ax_full,
) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
pts=np.array([axis_x, axis_y, axis_z]),
dtheta=None, psi=None,
ntheta=None, npsi=None,
n=None,
use_non_parallelism=use_non_parallelism,
grid=None,
return_lamb=True,
)
# -------------------------------------
# Select points on axis closest to lamb
# lamb_ax_full = self.get_lamb_from_bragg(bragg_ax_full)
shape_full = tuple(np.r_[shaperz, axis_npts])
lamb_ax_full = lamb_ax_full.reshape(shape_full)
phi_ax_full = phi_ax_full.reshape(shape_full)
dlamb = np.abs(lamb_ax_full - lamb)
indok = np.any(dlamb <= lamb_tol, axis=-1)
indmin = np.nanargmin(dlamb[indok, :], axis=-1)
indtup = tuple([iii for iii in indok.nonzero()] + [indmin])
phi_ax[indok] = phi_ax_full[indtup]
return phi_ax
def get_bragg_from_lamb(self, lamb=None, n=None):
""" Braggs' law: n*lamb = 2dsin(bragg) """
if self._dmat['d'] is None:
msg = "Interplane distance d no set !\n"
msg += " => self.set_dmat({'d':...})"
raise Exception(msg)
if lamb is None:
lamb = self._dbragg['lambref']
return _comp_optics.get_bragg_from_lamb(
np.atleast_1d(lamb), self._dmat['d'], n=n,
)
def get_lamb_from_bragg(self, bragg=None, n=None):
""" Braggs' law: n*lamb = 2dsin(bragg) """
if self._dmat['d'] is None:
msg = "Interplane distance d no set !\n"
msg += " => self.set_dmat({'d':...})"
raise Exception(msg)
if bragg is None:
bragg = self._dbragg['braggref']
return _comp_optics.get_lamb_from_bragg(np.atleast_1d(bragg),
self._dmat['d'], n=n)
def update_non_parallelism(self, alpha=None, beta=None):
""" Compute new values of unit vectors nout, e1 and e2 into
dmat basis, due to non parallelism
Update new values into dmat dict
"""
if alpha is None:
alpha = 0
if beta is None:
beta = 0
(self._dmat['nin'], self._dmat['nout'], self._dmat['e1'],
self._dmat['e2']) = _comp_optics.get_vectors_from_angles(
alpha, beta,
self._dgeom['nout'], self._dgeom['e1'],
self._dgeom['e2'],
)
self._dmat['alpha'], self._dmat['beta'] = alpha, beta
def calc_meridional_sagital_focus(
self,
rcurve=None,
bragg=None,
alpha=None,
use_non_parallelism=None,
verb=None,
):
""" Compute sagittal and meridional focuses distances.
Optionnal result according to non-parallelism, using first the
update_non_parallelism method.
parameters
----------
rcurve: float
in dgeom dict., curvature radius of the crystal.
bragg: float
in dbragg dict., reference bragg angle of the crystal.
alpha: float
in dmat dict., amplitude of the non-parallelism
as an a angle defined by user, in radian.
use_non_parallelism: str
Need to be True to use new alpha angle
Return
------
merid_ref: float
Distance crystal-meridional focus (m), for a perfect crystal
sagit_ref: float
Distance crystal-sagital focus (m), for a perfect crystal
merid_unp: float
Distance crystal-meridional focus (m), using non_parallelism
sagit_unp: float
Distance crystal-sagital focus (m), using non_parallelism
"""
# Check inputs
if rcurve is None:
rcurve = self._dgeom['rcurve']
if bragg is None:
bragg = self._dbragg['braggref']
if use_non_parallelism is True:
alpha = self._dmat['alpha']
if use_non_parallelism is False:
alpha = 0.0
# Compute
return _comp_optics.calc_meridional_sagital_focus(
rcurve=rcurve,
bragg=bragg,
alpha=alpha,
use_non_parallelism=use_non_parallelism,
verb=verb,
)
def get_rowland_dist_from_lambbragg(self, bragg=None, lamb=None, n=None):
""" Return the array of dist from cryst summit to pts on rowland """
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
if np.all(np.isnan(bragg)):
msg = ("There is no available bragg angle!\n"
+ " => Check the vlue of self.dmat['d'] vs lamb")
raise Exception(msg)
return _comp_optics.get_rowland_dist_from_bragg(
bragg=bragg, rcurve=self._dgeom['rcurve'],
)
def get_detector_ideal(
self,
bragg=None, lamb=None,
rcurve=None, n=None,
ddist=None, di=None, dj=None,
dtheta=None, dpsi=None, tilt=None,
lamb0=None, lamb1=None, dist01=None,
use_non_parallelism=None,
tangent_to_rowland=None, plot=False,
):
""" Return approximate ideal detector geometry
Assumes infinitesimal and ideal crystal
Returns a dict containing the position and orientation of a detector if
it was placed ideally on the rowland circle, centered on the
desired bragg angle (in rad) or wavelength (in m)
The detector can be tangential to the Rowland circle or perpendicular
to the line between the crystal and the detector
Assumes detector center matching lamb (m) / bragg (rad)
The detector can be translated towards / away from the crystal
to make sure the distance between 2 spectral lines
(lamb0 and lamb1) on the detector's plane matches
a desired distance (dist01, in m)
Finally, a desired offset (translation) can be added
via (ddist, di, dj), in m
Similarly, an extra rotation can be added via (dtheta, dpsi, tilt)
Detector is described by center position
and (nout, ei, ej) unit vectors
By convention, nout = np.cross(ei, ej)
Vectors (ei, ej) define an orthogonal frame in the detector's plane
All coordinates are 3d (X, Y, Z in the tokamak's frame)
Return:
-------
det: dict
dict of detector geometrical characteristics:
'cent': np.ndarray
(3,) array of (x, y, z) coordinates of detector center
'nout': np.ndarray
(3,) array of (x, y, z) coordinates of unit vector
perpendicular to detector' surface
oriented towards crystal
'ei': np.ndarray
(3,) array of (x, y, z) coordinates of unit vector
defining first coordinate in detector's plane
'ej': np.ndarray
(3,) array of (x, y, z) coordinates of unit vector
defining second coordinate in detector's plane
'outline': np.darray
(2, N) array to build detector's contour
where the last point is identical to the first.
(for example for WEST X2D spectrometer:
x*np.r_[-1,-1,1,1,-1], y*np.r_[-1,1,1,-1,-1])
"""
# ---------------------
# Check / format inputs
if rcurve is None:
rcurve = self._dgeom['rcurve']
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
if np.all(np.isnan(bragg)):
msg = ("There is no available bragg angle!\n"
+ " => Check the vlue of self.dmat['d'] vs lamb")
raise Exception(msg)
lc = [lamb0 is not None, lamb1 is not None, dist01 is not None]
if any(lc) and not all(lc):
msg = (
"Arg lamb0, lamb1 and dist01 must be provided together:\n"
+ "\t- lamb0: line0 wavelength ({})\n".format(lamb0)
+ "\t- lamb1: line1 wavelength ({})\n".format(lamb1)
+ "\t- dist01: distance (m) on detector between lines "
+ "({})".format(dist01)
)
raise Exception(msg)
bragg01 = None
if all(lc):
bragg01 = self._checkformat_bragglamb(
lamb=np.r_[lamb0, lamb1], n=n,
)
# split into 2 different condition because of dmat
lc = [rcurve is None, self._dgeom['summit'] is None]
if any(lc):
msg = (
"Some missing fields in dgeom for computation:"
+ "\n\t-" + "\n\t-".join(['rcurve'] + 'summit')
)
raise Exception(msg)
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
lc = [cc is None for cc in [nout, e1, e2]]
if any(lc):
msg = (
"""
Field 'nout', 'e1', 'e2' missing!
"""
)
raise Exception(msg)
# Compute crystal-centered parameters in (nout, e1, e2)
(det_dist, n_crystdet_rel,
det_nout_rel, det_ei_rel) = _comp_optics.get_approx_detector_rel(
rcurve, bragg,
bragg01=bragg01, dist01=dist01,
tangent_to_rowland=tangent_to_rowland)
# Deduce absolute position in (x, y, z)
det_cent, det_nout, det_ei, det_ej = _comp_optics.get_det_abs_from_rel(
det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel,
self._dgeom['summit'], nout, e1, e2,
ddist=ddist, di=di, dj=dj,
dtheta=dtheta, dpsi=dpsi, tilt=tilt)
if plot:
dax = self.plot()
p0 = np.repeat(det_cent[:,None], 3, axis=1)
vv = np.vstack((det_nout, det_ei, det_ej)).T
dax['cross'].plot(np.hypot(det_cent[0], det_cent[1]),
det_cent[2], 'xb')
dax['hor'].plot(det_cent[0], det_cent[1], 'xb')
dax['cross'].quiver(np.hypot(p0[0, :], p0[1, :]), p0[2, :],
np.hypot(vv[0, :], vv[1, :]), vv[2, :],
units='xy', color='b')
dax['hor'].quiver(p0[0, :], p0[1, :], vv[0, :], vv[1, :],
units='xy', color='b')
return {'cent': det_cent, 'nout': det_nout,
'ei': det_ei, 'ej': det_ej}
def _checkformat_det(self, det=None):
lc = [det is None, det is False, isinstance(det, dict)]
msg = ("det must be:\n"
+ "\t- False: not det provided\n"
+ "\t- None: use default approx det from:\n"
+ "\t self.get_detector_ideal()\n"
+ "\t- dict: a dictionary of 3d (x,y,z) coordinates of a point"
+ " (local frame center) and 3 unit vectors forming a direct "
+ "orthonormal basis attached to the detector's frame\n"
+ "\t\t\t\t- 'cent': detector center\n"
+ "\t\t\t\t- 'nout': unit vector perpendicular to surface, "
+ "in direction of the crystal\n"
+ "\t\t\t\t- 'ei': unit vector, first coordinate on surface\n"
+ "\t\t\t\t- 'ej': unit vector, second coordinate on surfacei\n"
+ " You provided: {}".format(det))
if not any(lc):
raise Exception(msg)
if lc[0]:
det = self.get_detector_ideal(lamb=self._dbragg['lambref'])
elif lc[2]:
lk = ['cent', 'nout', 'ei', 'ej']
c0 = (isinstance(det, dict)
and all([(kk in det.keys()
and hasattr(det[kk], '__iter__')
and np.atleast_1d(det[kk]).size == 3
and not np.any(np.isnan(det[kk])))
for kk in lk]))
if not c0:
raise Exception(msg)
for k0 in lk:
det[k0] = np.atleast_1d(det[k0]).ravel()
return det
def get_local_noute1e2(
self,
dtheta=None, psi=None,
ntheta=None, npsi=None,
use_non_parallelism=None,
include_summit=None,
):
""" Return (vout, ve1, ve2) associated to pts on the crystal's surface
All points on the spherical crystal's surface are identified
by (dtheta, psi) coordinates, where:
- theta = np.pi/2 + dtheta (dtheta=0 default) for the center
(for the diffracted beam), from frame's basis vector ez
- psi = 0 for the center, positive in direction of e1
They are the spherical coordinates from a sphere centered on the
crystal's center of curvature.
Args (dtheta, psi) can be:
- arbitrary: same shape and dimension up to 4
- 'envelop': will be computed to represent the crystal contour
will be returned as 2 1d arrays
Return the pts themselves and the 3 perpendicular local unit vectors
(nout, e1, e2), where nout is towards the outside of the sphere and
nout = np.cross(e1, e2)
In all cases, the output have shape (3, psi.shape)
Return:
-------
summ: np.ndarray
coordinates of the points on the surface
vout: np.ndarray
coordinates of outward unit vector
ve1: np.ndarray
coordinates of first tangential unit vector
ve2: np.ndarray
coordinates of second tangential unit vector
All are cartesian (X, Y, Z) coordinates in the tokamak's frame
"""
# Get local basis at crystal summit
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
nin = -nout
# Get vectors at any points from psi & dtheta
vout, ve1, ve2 = _comp_optics.CrystBragg_get_noute1e2_from_psitheta(
nout, e1, e2,
psi=psi, dtheta=dtheta,
e1e2=True, sameshape=False,
extenthalf_psi=self._dgeom['extenthalf'][0],
extenthalf_dtheta=self._dgeom['extenthalf'][1],
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
)
vin = -vout
# cent no longer dgeom['center'] because no longer a fixed point
cent = self._dgeom['summit'] + self._dgeom['rcurve']*nin
reshape = np.r_[3, [1 for ii in range(vout.ndim - 1)]]
cent = cent.reshape(reshape)
# Redefining summit according to nout at each point at crystal
summ = cent + self._dgeom['rcurve']*vout
return summ, vout, ve1, ve2
def calc_xixj_from_braggphi(
self,
phi=None,
bragg=None,
lamb=None,
n=None,
dtheta=None,
psi=None,
det=None,
use_non_parallelism=None,
strict=None,
return_strict=None,
data=None,
plot=True,
dax=None,
):
""" Assuming crystal's summit as frame origin
According to [1], this assumes a local frame centered on the crystal
These calculations are independent from the tokamak's frame:
The origin of the local frame is the crystal's summit
The (O, ez) axis is the crystal's normal
The crystal is tangent to (O, ex, ey)
[1] tofu/Notes_Upgrades/SpectroX2D/SpectroX2D_EllipsesOnPlane.pdf
Parameters:
-----------
Z: float
Detector's plane intersection with (O, ez) axis
n: np.ndarray
(3,) array containing local (x,y,z) coordinates of the plane's
normal vector
"""
if return_strict is None:
return_strict = False
# Check / format inputs
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
phi = np.atleast_1d(phi)
# Check / get det
det = self._checkformat_det(det)
# Get local summit nout, e1, e2 if non-centered
if dtheta is None:
dtheta = 0.
if psi is None:
psi = 0.
# Probably to update with use_non_parallelism?
# Get back summit & vectors at any point at the crystal surface,
# according to parallelism properties
summit, nout, e1, e2 = self.get_local_noute1e2(
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
ntheta=None, npsi=None,
include_summit=False,
)
# Compute
xi, xj, strict = _comp_optics.calc_xixj_from_braggphi(
det_cent=det['cent'],
det_nout=det['nout'], det_ei=det['ei'], det_ej=det['ej'],
det_outline=det.get('outline'),
summit=summit, nout=nout, e1=e1, e2=e2,
bragg=bragg, phi=phi, strict=strict,
)
if plot:
dax = _plot_optics.CrystalBragg_plot_approx_detector_params(
bragg, xi, xj, data, dax,
)
if return_strict is True:
return xi, xj, strict
else:
return xi, xj
def plot_line_on_det_tracing(
self, lamb=None, n=None,
nphi=None,
det=None, johann=None,
use_non_parallelism=None,
lpsi=None, ldtheta=None,
strict=None,
ax=None, dleg=None,
rocking=None, fs=None, dmargin=None,
wintit=None, tit=None,
):
""" Visualize the de-focusing by ray-tracing of chosen lamb
Possibility to plot few wavelength' arcs on the same plot.
Args:
- lamb: array of min size 1, in 1e-10 [m]
- det: dict
- xi_bounds: np.min & np.max of _XI
- xj_bounds: np.min & np.max of _XJ
(from "inputs_temp/XICS_allshots_C34.py" l.649)
- johann: True or False
"""
# Check / format inputs
if lamb is None:
lamb = self._dbragg['lambref']
lamb = np.atleast_1d(lamb).ravel()
nlamb = lamb.size
if johann is None:
johann = lpsi is not None or ldtheta is not None
if rocking is None:
rocking = False
if det is None or det.get('outline') is None:
msg = ("Please provide det as a dict with 'outline'!")
raise Exception(msg)
# Get local basis
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
nin = -nout
# Compute lamb / phi
_, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=det['outline'][0, :], xj=det['outline'][1, :], det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=False,
)
phimin, phimax = np.nanmin(phi), np.nanmax(phi)
phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10
# Get reference ray-tracing
bragg = self._checkformat_bragglamb(lamb=lamb, n=n)
if nphi is None:
nphi = 100
phi = np.linspace(phimin, phimax, nphi)
xi = np.full((nlamb, nphi), np.nan)
xj = np.full((nlamb, nphi), np.nan)
for ll in range(nlamb):
xi[ll, :], xj[ll, :] = self.calc_xixj_from_braggphi(
bragg=np.full(phi.shape, bragg[ll]),
phi=phi,
dtheta=0.,
psi=0.,
n=n,
det=det,
use_non_parallelism=use_non_parallelism,
strict=strict,
plot=False,
)
# Get johann-error raytracing (multiple positions on crystal)
xi_er, xj_er = None, None
if johann and not rocking:
if lpsi is None:
lpsi = np.linspace(-1., 1., 15)
if ldtheta is None:
ldtheta = np.linspace(-1., 1., 15)
lpsi, ldtheta = np.meshgrid(lpsi, ldtheta)
lpsi = lpsi.ravel()
ldtheta = ldtheta.ravel()
lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]
ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]
npsi = lpsi.size
assert npsi == ldtheta.size
xi_er = np.full((nlamb, npsi*nphi), np.nan)
xj_er = np.full((nlamb, npsi*nphi), np.nan)
for l in range(nlamb):
for ii in range(npsi):
i0 = np.arange(ii*nphi, (ii+1)*nphi)
xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_braggphi(
phi=phi, bragg=bragg[l], lamb=None, n=n,
dtheta=ldtheta[ii], psi=lpsi[ii],
det=det, plot=False,
use_non_parallelism=use_non_parallelism,
strict=strict,
)
# Get rocking curve error
if rocking:
pass
# Plot
return _plot_optics.CrystalBragg_plot_line_tracing_on_det(
lamb, xi, xj, xi_er, xj_er,
det=det, ax=ax, dleg=dleg,
johann=johann, rocking=rocking,
fs=fs, dmargin=dmargin, wintit=wintit, tit=tit)
def calc_johannerror(
self,
xi=None, xj=None, err=None,
det=None, n=None,
lpsi=None, ldtheta=None,
lambda_interval_min=None,
lambda_interval_max=None,
use_non_parallelism=None,
plot=True, fs=None, cmap=None,
vmin=None, vmax=None, tit=None, wintit=None,
):
""" Plot the johann error
The johann error is the error (scattering) induced by defocalization
due to finite crystal dimensions
There is a johann error on wavelength (lamb => loss of spectral
resolution) and on directionality (phi)
If provided, lpsi and ldtheta are taken as normalized variations with
respect to the crystal summit and to its extenthalf.
Typical values are:
- lpsi = [-1, 1, 1, -1]
- ldtheta = [-1, -1, 1, 1]
They must have the same len()
First affecting a reference lambda according to:
- pixel's position
- crystal's summit
Then, computing error on bragg and phi angles on each pixels by
computing lambda and phi from the crystal's outline
Provide lambda_interval_min/max to ensure the given wavelength interval
is detected over the whole surface area.
A True/False boolean is then returned.
"""
# Check xi, xj once before to avoid doing it twice
if err is None:
err = 'abs'
if lambda_interval_min is None:
lambda_interval_min = 3.93e-10
if lambda_interval_max is None:
lambda_interval_max = 4.00e-10
xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)
# Check / format inputs
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
# Only one summit was selected
bragg, phi, lamb = bragg[..., 0], phi[..., 0], lamb[..., 0]
# Check lambda interval into lamb array
c0 = (
np.min(lamb) < lambda_interval_min
and np.max(lamb) > lambda_interval_max
)
if c0:
test_lambda_interv = True
else:
test_lambda_interv = False
# Get err from multiple ldtheta, lpsi
if lpsi is None:
lpsi = np.r_[-1., 0., 1., 1., 1., 0., -1, -1]
lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]
if ldtheta is None:
ldtheta = np.r_[-1., -1., -1., 0., 1., 1., 1., 0.]
ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]
npsi = lpsi.size
assert npsi == ldtheta.size
(
braggerr, phierr, lamberr,
) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=ldtheta, psi=lpsi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
err_lamb = np.nanmax(np.abs(lamb[..., None] - lamberr), axis=-1)
err_phi = np.nanmax(np.abs(phi[..., None] - phierr), axis=-1)
# absolute vs relative error
if 'rel' in err:
if err == 'rel':
err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb))
err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi))
elif err == 'rel2':
err_lamb = 100.*err_lamb / np.mean(lamb)
err_phi = 100.*err_phi / np.mean(phi)
err_lamb_units = '%'
err_phi_units = '%'
else:
err_lamb_units = 'm'
err_phi_units = 'rad'
if plot is True:
ax = _plot_optics.CrystalBragg_plot_johannerror(
xi, xj, lamb, phi,
err_lamb, err_phi,
err_lamb_units=err_lamb_units,
err_phi_units=err_phi_units,
cmap=cmap, vmin=vmin, vmax=vmax,
fs=fs, tit=tit, wintit=wintit,
)
return (
err_lamb, err_phi, err_lamb_units, err_phi_units,
test_lambda_interv,
)
def plot_focal_error_summed(
self,
dist_min=None, dist_max=None,
di_min=None, di_max=None,
ndist=None, ndi=None,
lamb=None, bragg=None,
xi=None, xj=None,
err=None,
use_non_parallelism=None,
tangent_to_rowland=None, n=None,
plot=None,
pts=None,
det_ref=None, plot_dets=None, nsort=None,
dcryst=None,
lambda_interval_min=None,
lambda_interval_max=None,
contour=None,
fs=None,
ax=None,
cmap=None,
vmin=None,
vmax=None,
return_ax=None,
):
"""
Using the calc_johannerror method, computing the sum of the
focalization error over the whole detector for different positions
characterized by the translations ddist and di in the equatorial plane
(dist_min, dist_max, ndist) (di_min, di_max, ndi).
Parameters:
-----------
- lamb/bragg : float
Automatically set to crystal's references
- xi, xj : np.ndarray
pixelization of the detector
(from "inputs_temp/XICS_allshots_C34.py" l.649)
- alpha, beta : float
Values of Non Parallelism references angles
- use_non_parallelism : str
- tangent_to_rowland : str
- plot_dets : str
Possibility to plot the nsort- detectors with the lowest
summed focalization error, next to the Best Approximate Real
detector
dict(np.load('det37_CTVD_incC4_New.npz', allow_pickle=True))
- nsort : float
Number of best detector's position to plot
- lambda_interv_min/max : float
To ensure the given wavelength interval is detected over the whole
surface area. A True/False boolean is then returned.
"""
# Check / format inputs
if dist_min is None:
dist_min = -0.15
if dist_max is None:
dist_max = 0.15
if di_min is None:
di_min = -0.40
if di_max is None:
di_max = 0.40
if ndist is None:
ndist = 21
if ndi is None:
ndi = 21
if err is None:
err = 'rel'
if plot is None:
plot = True
if plot_dets is None:
plot_dets = det_ref is not None
if nsort is None:
nsort = 5
if return_ax is None:
return_ax = True
if lambda_interval_min is None:
lambda_interval_min = 3.93e-10
if lambda_interval_max is None:
lambda_interval_max = 4.00e-10
l0 = [dist_min, dist_max, ndist, di_min, di_max, ndi]
c0 = any([l00 is not None for l00 in l0])
if not c0:
msg = (
"Please give the ranges of ddist and di translations\n"
"\t to compute the different detector's position\n"
"\t Provided:\n"
"\t\t- dist_min, dist_max, ndist: ({}, {}, {})\n".format(
dist_min, dist_max, ndist,
)
+ "\t\t- di_min, di_max, ndi: ({}, {}, {})\n".format(
di_min, di_max, ndi,
)
)
raise Exception(msg)
# ------------
# Compute local coordinates of det_ref
(
ddist0, di0, dj0,
dtheta0, dpsi0, tilt0,
) = self._get_local_coordinates_of_det(
bragg=bragg,
lamb=lamb,
det_ref=det_ref,
use_non_parallelism=use_non_parallelism,
)
# angle between nout vectors from get_det_approx() &
## get_det_approx(tangent=False)
det1 = self.get_detector_ideal(
lamb=lamb,
bragg=bragg,
use_non_parallelism=use_non_parallelism,
tangent_to_rowland=True,
)
det2 = self.get_detector_ideal(
lamb=lamb,
bragg=bragg,
use_non_parallelism=use_non_parallelism,
tangent_to_rowland=False,
)
cos_angle_nout = np.sum(
det1['nout'] * det2['nout']
) / (
np.linalg.norm(det1['nout'] * np.linalg.norm(det2['nout']))
)
angle_nout = np.arccos(cos_angle_nout)
# Compute
ddist = np.linspace(dist_min, dist_max, int(ndist))
di = np.linspace(di_min, di_max, int(ndi))
error_lambda = np.full((di.size, ddist.size), np.nan)
test_lamb_interv = np.zeros((di.size, ddist.size), dtype='bool')
end = '\r'
for ii in range(ddist.size):
for jj in range(di.size):
# print progression
if ii == ndist-1 and jj == ndi-1:
end = '\n'
msg = (
"Computing mean focal error for det "
f"({ii+1}, {jj+1})/({ndist}, {ndi})"
).ljust(60)
print(msg, end=end, flush=True)
# Get det
dpsi0bis = float(dpsi0)
if tangent_to_rowland:
dpsi0bis = dpsi0 - angle_nout
det = self.get_detector_ideal(
ddist=ddist[ii],
di=di[jj],
dj=dj0,
dtheta=dtheta0,
dpsi=dpsi0bis,
tilt=tilt0,
lamb=lamb,
bragg=bragg,
use_non_parallelism=use_non_parallelism,
tangent_to_rowland=False,
)
# Integrate error
(
error_lambda_temp, test_lamb_interv[jj, ii],
) = self.calc_johannerror(
xi=xi, xj=xj,
det=det,
err=err,
lambda_interval_min=lambda_interval_min,
lambda_interval_max=lambda_interval_max,
plot=False,
)[::4]
error_lambda[jj, ii] = np.nanmean(error_lambda_temp)
if 'rel' in err:
units = '%'
else:
units = 'm'
if plot:
ax = _plot_optics.CrystalBragg_plot_focal_error_summed(
cryst=self, dcryst=dcryst,
lamb=lamb, bragg=bragg,
error_lambda=error_lambda,
ddist=ddist, di=di,
ddist0=ddist0, di0=di0, dj0=dj0,
dtheta0=dtheta0, dpsi0=dpsi0, tilt0=tilt0,
angle_nout=angle_nout,
det_ref=det_ref,
units=units,
plot_dets=plot_dets, nsort=nsort,
tangent_to_rowland=tangent_to_rowland,
use_non_parallelism=use_non_parallelism,
pts=pts,
test_lamb_interv=test_lamb_interv,
contour=contour,
fs=fs,
ax=ax,
cmap=cmap,
vmin=vmin,
vmax=vmax,
)
if return_ax:
return error_lambda, ddist, di, test_lamb_interv, ax
else:
return error_lambda, ddist, di, test_lamb_interv
def _get_local_coordinates_of_det(
self,
bragg=None,
lamb=None,
det_ref=None,
use_non_parallelism=None,
):
"""
Computation of translation (ddist, di, dj) and angular
(dtheta, dpsi, tilt) properties of an arbitrary detector choosen by
the user.
"""
# ------------
# check inputs
if det_ref is None:
msg = (
"You need to provide your arbitrary detector\n"
+ "\t in order to compute its spatial properties !\n"
+ "\t You provided: {}".format(det)
)
raise Exception(msg)
# Checkformat det
det_ref = self._checkformat_det(det=det_ref)
# ------------
# get approx detect
det_approx = self.get_detector_ideal(
bragg=bragg, lamb=lamb,
tangent_to_rowland=False,
use_non_parallelism=use_non_parallelism,
)
# ------------
# get vector delta between centers
delta = det_ref['cent'] - det_approx['cent']
ddist = np.sum(delta * (-det_approx['nout']))
di = np.sum(delta * det_approx['ei'])
dj = np.sum(delta * det_approx['ej'])
# ---------------
# get angles from unit vectors
dtheta, dpsi, tilt = None, None, None
# use formulas in _comp_optics.get_det_abs_from_rel()
sindtheta = np.sum(det_approx['ej'] * det_ref['nout'])
costheta_cospsi = np.sum(det_approx['nout'] * det_ref['nout'])
costheta_sinpsi = np.sum(det_approx['ei'] * det_ref['nout'])
costheta = np.sqrt(costheta_cospsi**2 + costheta_sinpsi**2)
dtheta = np.arctan2(sindtheta, costheta)
dpsi = np.arctan2(
costheta_sinpsi / costheta,
costheta_cospsi / costheta,
)
# ---------
# tilt
det_ei2 = (
np.cos(dpsi)*det_approx['ei'] - np.sin(dpsi)*det_approx['nout']
)
det_ej2 = np.cross(det_ref['nout'], det_ei2)
costilt = np.sum(det_ref['ei']*det_ei2)
sintilt = np.sum(det_ref['ei']*det_ej2)
tilt = np.arctan2(sintilt, costilt)
return ddist, di, dj, dtheta, dpsi, tilt
def get_lambbraggphi_from_ptsxixj_dthetapsi(
self,
pts=None,
xi=None, xj=None, det=None,
dtheta=None, psi=None,
ntheta=None, npsi=None,
n=None,
use_non_parallelism=None,
grid=None,
return_lamb=None,
):
""" Return the lamb, bragg and phi for provided pts and dtheta/psi
if grid = True:
compute all pts / dtheta/psi comnbinations
=> return (npts, ndtheta) arrays
else:
each pts is associated to a single dtheta/psi
=> assumes npts == ndtheta == npsi
=> return (npts,) arrays
"""
# Check / Format inputs
if return_lamb is None:
return_lamb = True
det = self._checkformat_det(det)
# Get local basis
summ, vout, ve1, ve2 = self.get_local_noute1e2(
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
use_non_parallelism=use_non_parallelism,
include_summit=True,
)
# Derive bragg, phi
bragg, phi = _comp_optics.calc_braggphi_from_xixjpts(
pts=pts,
xi=xi, xj=xj, det=det,
summit=summ, nin=-vout, e1=ve1, e2=ve2,
grid=grid,
)
# Derive lamb
if return_lamb is True:
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
return bragg, phi, lamb
else:
return bragg, phi
def get_lamb_avail_from_pts(
self,
pts=None,
n=None, ndtheta=None,
det=None, nlamb=None, klamb=None,
use_non_parallelism=None,
strict=None,
return_phidtheta=None,
return_xixj=None,
):
""" Return the wavelength accessible from plasma points on the crystal
For a given plasma point, only a certain lambda interval can be
bragg-diffracted on the crystal (due to bragg's law and the crystal's
dimensions)
Beware, for a given pts and lamb, there can be up to 2 sets of
solutions
All non-valid solutions are set to nans, such that most of the time
there is only one
For a set of given:
- pts (3, npts) array, (x, y, z) coordinates
Using:
- nlamb: sampling of the lamb interval (default: 100)
- ndtheta: sampling of the lamb interval (default: 20)
- det: (optional) a detector dict, for xi and xj
Returns:
- lamb: (npts, nlamb) array of sampled valid wavelength interval
- phi: (npts, nlamb, ndtheta, 2) array of phi
- dtheta: (npts, nlamb, ndtheta, 2) array of dtheta
- psi: (npts, nlamb, ndtheta, 2) array of psi
And optionally (return_xixj=True and det provided as dict):
- xi: (npts, nlamb, ndtheta, 2) array of xi
- xj: (npts, nlamb, ndtheta, 2) array of xj
The result is computed with or w/o taking into account non-parallelism
"""
# Check / format
if ndtheta is None:
ndtheta = 20
if nlamb is None:
nlamb = 100
assert nlamb >= 2, "nlamb must be >= 2"
if return_phidtheta is None:
return_phidtheta = True
if return_xixj is None:
return_xixj = det is not None
if det is None:
return_xixj = False
if det is None:
strict = False
# Get lamb min / max
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
pts=pts,
dtheta='envelop', psi='envelop',
ntheta=None, npsi=None,
n=n, grid=True,
use_non_parallelism=use_non_parallelism,
return_lamb=True,
)
lambmin = np.nanmin(lamb, axis=1)
lambmax = np.nanmax(lamb, axis=1)
if klamb is None:
klamb = np.linspace(0, 1, nlamb)
elif not (isinstance(klamb, np.ndarray) and klamb.ndim == 1):
msg = "Please provide klamb as a 1d vector!"
raise Exception(msg)
nlamb = klamb.size
lamb = lambmin[:, None] + (lambmax-lambmin)[:, None]*klamb
return _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(
cryst=self,
lamb=lamb,
n=n,
ndtheta=ndtheta,
pts=pts,
use_non_parallelism=use_non_parallelism,
return_phidtheta=return_phidtheta,
return_xixj=return_xixj,
strict=strict,
det=det,
)
def _calc_dthetapsiphi_from_lambpts(
self,
pts=None, bragg=None, lamb=None,
n=None, ndtheta=None,
use_non_parallelism=None,
grid=None,
):
# Check / Format inputs
pts = _comp_optics._checkformat_pts(pts)
npts = pts.shape[1]
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
# get nout, e1, e2
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism
)
# Compute dtheta, psi, indnan (nlamb, npts, ndtheta)
# In general there are 2 solutions! (only close to rowland in practice)
dtheta, psi, indok, grid = _comp_optics.calc_dthetapsiphi_from_lambpts(
pts,
bragg,
summit=self._dgeom['summit'], # To be updated (non-paralellism)?
rcurve=self._dgeom['rcurve'],
nout=nout, e1=e1, e2=e2,
extenthalf=self._dgeom['extenthalf'],
ndtheta=ndtheta,
grid=grid,
)
# reshape bragg for matching dtheta.shape
if grid is True:
bragg = np.repeat(
np.repeat(
np.repeat(bragg[:, None], npts, axis=-1)[..., None],
dtheta.shape[2],
axis=-1,
)[..., None],
2,
axis=-1,
)
pts = pts[:, None, :, None, None]
else:
bragg = np.repeat(
np.repeat(bragg[:, None], dtheta.shape[1], axis=1)[..., None],
2,
axis=-1,
)
pts = pts[..., None, None]
bragg[~indok] = np.nan
# Get corresponding phi and re-check bragg, for safety
bragg2, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
pts=pts,
dtheta=dtheta, psi=psi,
grid=False,
use_non_parallelism=use_non_parallelism,
return_lamb=False,
)
c0 = (
bragg2.shape == bragg.shape
and np.allclose(bragg, bragg2, equal_nan=True)
)
if not c0:
try:
plt.figure()
plt.plot(bragg, bragg2, '.')
except Exception as err:
pass
msg = (
"Inconsistency detected in bragg angle computations:\n"
+ "\t- from the points and lamb\n"
+ "\t- from the points and (dtheta, psi)\n"
+ "\nContext:\n"
+ "\t- use_non_parallelism: {}\n".format(use_non_parallelism)
+ "\t- bragg.shape = {}\n".format(bragg.shape)
+ "\t- bragg2.shape = {}\n".format(bragg2.shape)
)
raise Exception(msg)
return dtheta, psi, phi, bragg
def calc_raytracing_from_lambpts(
self,
lamb=None, bragg=None, pts=None,
xi_bounds=None, xj_bounds=None, nphi=None,
det=None, n=None, ndtheta=None,
johann=False, lpsi=None, ldtheta=None,
rocking=False, strict=None, plot=None, fs=None,
dmargin=None, wintit=None,
tit=None, proj=None,
legend=None, draw=None, returnas=None,
):
""" Visualize the de-focusing by ray-tracing of chosen lamb
If plot, 3 different plots can be produced:
- det: plots the intersection of rays with detector plane
- '2d': plots the geometry of the rays in 2d cross and hor
- '3d': plots the geometry of the rays in 3d
Specify the plotting option by setting plot to any of these (or a list)
"""
# Check / format inputs
if returnas is None:
returnas = 'data'
if plot is None or plot is True:
plot = ['det', '3d']
if isinstance(plot, str):
plot = plot.split('+')
assert all([ss in ['det', '2d', '3d'] for ss in plot])
assert returnas in ['data', 'ax']
pts = _comp_optics._checkformat_pts(pts)
npts = pts.shape[1]
# Get dtheta, psi and phi from pts/lamb
dtheta, psi, phi, bragg = self._calc_dthetapsiphi_from_lambpts(
pts=pts, lamb=lamb, bragg=bragg, n=n, ndtheta=ndtheta,
)
ndtheta = dtheta.shape[-1]
# assert dtheta.shape == (nlamb, npts, ndtheta)
# Check / get det
det = self._checkformat_det(det)
# Compute xi, xj of reflexion (phi -> phi + np.pi)
xi, xj = self.calc_xixj_from_braggphi(
bragg=bragg, phi=phi+np.pi, n=n,
dtheta=dtheta, psi=psi,
det=det, strict=strict, plot=False,
)
# Plot to be checked - unnecessary ?
plot = False
if plot is not False:
ptscryst, ptsdet = None, None
if '2d' in plot or '3d' in plot:
ptscryst = self.get_local_noute1e2(dtheta, psi)[0]
ptsdet = (det['cent'][:, None, None, None]
+ xi[None, ...]*det['ei'][:, None, None, None]
+ xj[None, ...]*det['ej'][:, None, None, None])
ax = _plot_optics.CrystalBragg_plot_raytracing_from_lambpts(
xi=xi, xj=xj, lamb=lamb,
xi_bounds=xi_bounds, xj_bounds=xj_bounds,
pts=pts, ptscryst=ptscryst, ptsdet=ptsdet,
det_cent=det['cent'], det_nout=det['nout'],
det_ei=det['ei'], det_ej=det['ej'],
cryst=self, proj=plot, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, legend=legend, draw=draw)
if returnas == 'ax':
return ax
return dtheta, psi, phi, bragg, xi, xj
def _calc_spect1d_from_data2d(self, data, lamb, phi,
nlambfit=None, nphifit=None,
nxi=None, nxj=None,
spect1d=None, mask=None, vertsum1d=None):
if nlambfit is None:
nlambfit = nxi
if nphifit is None:
nphifit = nxj
return _comp_optics._calc_spect1d_from_data2d(
data, lamb, phi,
nlambfit=nlambfit,
nphifit=nphifit,
spect1d=spect1d,
mask=mask,
vertsum1d=vertsum1d,
)
def plot_data_vs_lambphi(
self,
xi=None, xj=None, data=None, mask=None,
det=None, dtheta=None, psi=None, n=None,
nlambfit=None, nphifit=None,
magaxis=None, npaxis=None,
dlines=None, spect1d='mean',
lambmin=None, lambmax=None,
xjcut=None, dxj=None,
plot=True, fs=None, tit=None, wintit=None,
cmap=None, vmin=None, vmax=None,
returnas=None,
):
# Check / format inputs
assert data is not None
if returnas is None:
returnas = 'spect'
lreturn = ['ax', 'spect']
if returnas not in lreturn:
msg = ("Arg returnas must be in {}\n:".format(lreturn)
+ "\t- 'spect': return a 1d vertically averaged spectrum\n"
+ "\t- 'ax' : return a list of axes instances")
raise Exception(msg)
xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)
nxi = xi.size if xi is not None else np.unique(xii).size
nxj = xj.size if xj is not None else np.unique(xjj).size
# Compute lamb / phi
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
# Compute lambfit / phifit and spectrum1d
(spect1d, lambfit, phifit,
vertsum1d, phiminmax) = self._calc_spect1d_from_data2d(
data, lamb, phi,
nlambfit=nlambfit, nphifit=nphifit, nxi=nxi, nxj=nxj,
spect1d=spect1d, mask=mask, vertsum1d=True
)
# Get phiref from mag axis
lambax, phiax = None, None
if magaxis is not None:
if npaxis is None:
npaxis = 1000
thetacryst = np.arctan2(self._dgeom['summit'][1],
self._dgeom['summit'][0])
thetaax = thetacryst + np.pi/2*np.linspace(-1, 1, npaxis)
pts = np.array([magaxis[0]*np.cos(thetaax),
magaxis[0]*np.sin(thetaax),
np.full((npaxis,), magaxis[1])])
braggax, phiax = self.calc_braggphi_from_pts(pts)
lambax = self.get_lamb_from_bragg(braggax)
phiax = np.arctan2(np.sin(phiax-np.pi), np.cos(phiax-np.pi))
ind = ((lambax >= lambfit[0]) & (lambax <= lambfit[-1])
& (phiax >= phifit[0]) & (phiax <= phifit[-1]))
lambax, phiax = lambax[ind], phiax[ind]
ind = np.argsort(lambax)
lambax, phiax = lambax[ind], phiax[ind]
# Get lamb / phi for xj
lambcut, phicut, spectcut = None, None, None
if xjcut is not None:
if dxj is None:
dxj = 0.002
xjcut = np.sort(np.atleast_1d(xjcut).ravel())
xicutf = np.tile(xi, (xjcut.size, 1))
xjcutf = np.repeat(xjcut[:, None], nxi, axis=1)
(
braggcut, phicut, lambcut,
) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xicutf, xj=xjcutf, det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=1,
grid=True,
return_lamb=True,
)
indxj = [(np.abs(xj-xjc) <= dxj).nonzero()[0] for xjc in xjcut]
spectcut = np.array([np.nanmean(data[ixj, :], axis=0)
for ixj in indxj])
# plot
ax = None
if plot:
ax = _plot_optics.CrystalBragg_plot_data_vs_lambphi(
xi, xj, bragg, lamb, phi, data,
lambfit=lambfit, phifit=phifit, spect1d=spect1d,
vertsum1d=vertsum1d, lambax=lambax, phiax=phiax,
lambmin=lambmin, lambmax=lambmax, phiminmax=phiminmax,
xjcut=xjcut, lambcut=lambcut, phicut=phicut, spectcut=spectcut,
cmap=cmap, vmin=vmin, vmax=vmax, dlines=dlines,
tit=tit, wintit=wintit, fs=fs)
if returnas == 'spect':
return spect1d, lambfit
elif returnas == 'ax':
return ax
def get_plasmadomain_at_lamb(
self,
config=None,
struct=None,
domain=None,
res=None,
det=None,
xixj_lim=None,
strict=None,
bragg=None,
lamb=None,
# for available lamb determination
ndtheta=None,
nlamb=None,
n=None,
use_non_parallelism=None,
# plotting
plot=None,
dax=None,
plot_as=None,
lcolor=None,
return_dax=None,
):
""" Return pts in the plasma domain and a mask
The mask is True only for points for which the desired wavelength is
accesible from the crystal (and from the detector if strict=True and
det is provided)
More than one value of lamb can be provided (nlamb >= 1)
pts is returned as a (3, npts) array
lambok is returned as a (nlamb, npts) array
"""
# ------------
# check inputs
struct = _check_optics._check_config_get_Ves(
config=config, struct=struct,
)
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
# To be refined if xjlim is narrow
if ndtheta is None:
ndtheta = 5
# To be refined if xilim is narrow
if nlamb is None:
nlamb = 11
if strict is None:
strict = True
if plot is None:
plot = True
if return_dax is None:
return_dax = plot is True
# -------------
# sample volume
(
pts, dV, ind, (resR, resZ, resPhi),
) = config.dStruct['dObj']['Ves'][struct].get_sampleV(
res=res,
domain=domain,
returnas='(R, Z, Phi)',
)
# ------------------------------
# check access from crystal only
ptsXYZ = np.array([
pts[0, :]*np.cos(pts[2, :]),
pts[0, :]*np.sin(pts[2, :]),
pts[1, :],
])
lamb_access = self.get_lamb_avail_from_pts(
pts=ptsXYZ,
nlamb=2,
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=False,
strict=False,
)
lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)
for ii, ll in enumerate(lamb):
lambok[ii, :] = (
(lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])
)
# ---------------
# refactor pts and lambok
indok = np.any(lambok, axis=0)
pts = pts[:, indok]
ptsXYZ = ptsXYZ[:, indok]
lambok = lambok[:, indok]
# ---------------
# check strict
if strict is True:
# det vs detbis if xixj_lim
detbis = dict(det)
if xixj_lim is not None:
detbis['outline'] = np.array([
np.r_[
xixj_lim[0][0],
xixj_lim[0][1]*np.r_[1, 1],
xixj_lim[0][0],
],
np.r_[
xixj_lim[1][0]*np.r_[1, 1],
xixj_lim[1][1]*np.r_[1, 1],
],
])
detbis['outline'] = np.concatenate(
(detbis['outline'], detbis['outline'][:, 0:1]),
axis=1,
)
# intersection with detbis
for kk, ll in enumerate(lamb):
lambi = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(
cryst=self,
lamb=np.full((lambok[kk, :].sum(), 1), ll),
n=n,
ndtheta=ndtheta,
pts=ptsXYZ[:, lambok[kk, :]],
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=False,
strict=strict,
det=detbis,
)
lambok[kk, lambok[kk, :]] = ~np.isnan(lambi[:, 0])
# -------
# return
if plot:
dax = _plot_optics.CrystalBragg_plot_plasma_domain_at_lamb(
cryst=self,
det=det,
xixj_lim=xixj_lim,
config=config,
lamb=lamb,
pts=pts,
reseff=[resR, resZ, resPhi],
lambok=lambok,
dax=dax,
plot_as=plot_as,
lcolor=lcolor,
)
# ---------------
# return
if return_dax is True:
return pts, lambok, dax
else:
return pts, lambok
def calc_signal_from_emissivity(
self,
emis=None,
config=None,
struct=None,
domain=None,
res=None,
det=None,
xixj_lim=None,
strict=None,
bragg=None,
lamb=None,
binning=None,
# for available lamb determination
ndtheta=None,
nlamb=None,
n=None,
use_non_parallelism=None,
# plotting
plot=None,
vmin=None,
vmax=None,
vmin_bin=None,
vmax_bin=None,
cmap=None,
dax=None,
fs=None,
dmargin=None,
tit=None,
return_dax=None,
):
""" Return pts in the plasma domain and a mask
The mask is True only for points for which the desired wavelength is
accesible from the crystal (and from the detector if strict=True and
det is provided)
More than one value of lamb can be provided (nlamb >= 1)
pts is returned as a (3, npts) array
lambok is returned as a (nlamb, npts) array
"""
# ------------
# check inputs
(
struct, lamb, binning,
) = _check_optics._check_calc_signal_from_emissivity(
emis=emis, config=config, struct=struct,
lamb=lamb, det=det, binning=binning,
)
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
# To be refined if xjlim is narrow
if ndtheta is None:
ndtheta = 5
# To be refined if xilim is narrow
if nlamb is None:
nlamb = 11
if strict is None:
strict = True
if plot is None:
plot = True
if return_dax is None:
return_dax = plot is True
# -------------
# sample volume
(
pts, dV, ind, (resR, resZ, resPhi),
) = config.dStruct['dObj']['Ves'][struct].get_sampleV(
res=res,
domain=domain,
returnas='(R, Z, Phi)',
)
# ------------------------------
# check access from crystal only
ptsXYZ = np.array([
pts[0, :]*np.cos(pts[2, :]),
pts[0, :]*np.sin(pts[2, :]),
pts[1, :],
])
lamb_access = self.get_lamb_avail_from_pts(
pts=ptsXYZ,
nlamb=2,
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=False,
strict=False,
)
lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)
for ii, ll in enumerate(lamb):
lambok[ii, :] = (
(lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])
)
# ---------------
# refactor pts and lambok
indok = np.any(lambok, axis=0)
pts = pts[:, indok]
ptsXYZ = ptsXYZ[:, indok]
lambok = lambok[:, indok]
# ---------------
# check strict
# det vs detbis if xixj_lim
detbis = dict(det)
if xixj_lim is not None:
detbis['outline'] = np.array([
np.r_[
xixj_lim[0][0],
xixj_lim[0][1]*np.r_[1, 1],
xixj_lim[0][0],
],
np.r_[
xixj_lim[1][0]*np.r_[1, 1],
xixj_lim[1][1]*np.r_[1, 1],
],
])
detbis['outline'] = np.concatenate(
(detbis['outline'], detbis['outline'][:, 0:1]),
axis=1,
)
# intersection with detbis
shape = tuple(np.r_[pts.shape[1], lamb.size, ndtheta, 2])
xi = np.full(shape, np.nan)
xj = np.full(shape, np.nan)
val = np.full(shape, np.nan)
for kk, ll in enumerate(lamb):
(
lambi, xii, xji,
) = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(
cryst=self,
lamb=np.full((lambok[kk, :].sum(), 1), ll),
n=n,
ndtheta=ndtheta,
pts=ptsXYZ[:, lambok[kk, :]],
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=True,
strict=True,
det=detbis,
)
iok = ~np.isnan(lambi[:, 0])
iokf = lambok[kk, :].nonzero()[0][iok]
lambok[kk, lambok[kk, :]] = iok
xi[iokf, kk, :, :] = xii[iok, 0, :, :]
xj[iokf, kk, :, :] = xji[iok, 0, :, :]
val[iokf, kk, :, :] = emis(
r=pts[0, iokf],
z=pts[1, iokf],
phi=pts[2, iokf],
lamb=lamb[kk:kk+1],
t=None,
)[:, 0, None, None]
# -------
# Optional binning
binned = None
if binning is not False:
iok = np.isfinite(val)
binned = scpstats.binned_statistic_2d(
xi[iok].ravel(),
xj[iok].ravel(),
val[iok].ravel(),
statistic='mean',
bins=binning,
expand_binnumbers=False,
)[0]
# -------
# return
if plot:
dax = _plot_optics.CrystalBragg_plot_signal_from_emissivity(
cryst=self,
det=det,
xixj_lim=xixj_lim,
config=config,
lamb=lamb,
pts=pts,
reseff=[resR, resZ, resPhi],
xi=xi,
xj=xj,
val=val,
lambok=lambok,
binning=binning,
binned=binned,
# plotting
vmin=vmin,
vmax=vmax,
vmin_bin=vmin_bin,
vmax_bin=vmax_bin,
cmap=cmap,
dax=dax,
fs=fs,
dmargin=dmargin,
tit=tit,
)
# ---------------
# return
if return_dax is True:
return pts, val, xi, xj, binned, dax
else:
return pts, val, xi, xj, binned
@staticmethod
def fit1d_dinput(
dlines=None, dconstraints=None, dprepare=None,
data=None, lamb=None,
mask=None, domain=None, pos=None, subset=None,
same_spectrum=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None, valid_return_fract=None,
):
""" Return a formatted dict of lines and constraints
To be fed to _fit12d.multigausfit1d_from_dlines()
Provides a user-friendly way of defining constraints
"""
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit1d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, lamb=lamb,
mask=mask, domain=domain, pos=pos, subset=subset,
same_spectrum=same_spectrum,
same_spectrum_dlamb=same_spectrum_dlamb,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,
valid_return_fract=valid_return_fract)
def fit1d(
self,
# Input data kwdargs
data=None, lamb=None,
dinput=None, dprepare=None, dlines=None, dconstraints=None,
mask=None, domain=None, subset=None, pos=None,
same_spectrum=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None,
# Optimization kwdargs
dx0=None, dscales=None, x0_scale=None, bounds_scale=None,
method=None, tr_solver=None, tr_options=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=None, chain=None, jac=None, showonly=None,
# Results extraction kwdargs
amp=None, coefs=None, ratio=None,
Ti=None, width=None, vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
# Saving and plotting kwdargs
save=None, name=None, path=None,
plot=None, fs=None, dmargin=None,
tit=None, wintit=None, returnas=None,
):
# ----------------------
# Get dinput for 1d fitting from dlines, dconstraints, dprepare...
if dinput is None:
dinput = self.fit1d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, lamb=lamb,
mask=mask, domain=domain, pos=pos, subset=subset,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,
same_spectrum=same_spectrum,
same_spectrum_dlamb=same_spectrum_dlamb)
# ----------------------
# return
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit1d(
# Input data kwdargs
data=data, lamb=lamb,
dinput=dinput, dprepare=dprepare,
dlines=dlines, dconstraints=dconstraints,
mask=mask, domain=domain, subset=subset, pos=pos,
# Optimization kwdargs
method=method, tr_solver=tr_solver, tr_options=tr_options,
xtol=xtol, ftol=ftol, gtol=gtol,
max_nfev=max_nfev, loss=loss, chain=chain,
dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,
jac=jac, verbose=verbose,
save=save, name=name, path=path,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width, vi=vi, shift=shift,
pts_lamb_total=pts_lamb_total,
pts_lamb_detail=pts_lamb_detail,
plot=plot, fs=fs, wintit=wintit, tit=tit)
@staticmethod
def fit1d_extract(
dfit1d=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
):
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit1d_extract(
dfit1d=dfit,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width,
vi=vi, shift=shift,
pts_lamb_total=pts_lamb_total, pts_lamb_detail=pts_lamb_detail)
def fit1d_from2d(self):
""" Useful for optimizing detector or crystal position
Given a set of 2d images on a detector
Transform the 2d (xi, xj) image into (lamb, phi)
Slice nphi 1d spectra
Fit them using a dict of reference lines (dlines)
Optionally provide constraints for the fitting
Return the vertical profiles of the wavelength shitf of each line
To be used as input for an cost function and optimization
1d fitting is used instead of 2d because:
- faster (for optimization)
- does not require a choice of nbsplines
- easier to understand and decide for user
"""
# Check / format inputs
if lphi is None:
msg = ("Arg lphi must be provided !")
raise Exception(msg)
# ----------------------
# Prepare input data
# (geometrical transform, domain, binning, subset, noise...)
if dprepare is None:
dprepare = self.fit2d_prepare(
data=data, xi=xi, xj=xj, n=n,
det=det, dtheta=dtheta, psi=psi,
mask=mask, domain=domain,
pos=pos, binning=binning,
nbsplines=False, subset=False,
lphi=lphi, lphi_tol=lphi_tol)
# ----------------------
# Get dinput for 2d fitting from dlines, and dconstraints
if dinput is None:
dinput = self.fit2d_dinput(
dlines=dlines, dconstraints=dconstraints,
deg=deg, knots=knots, nbsplines=nbsplines,
domain=dprepare['domain'],
dataphi1d=dprepare['dataphi1d'], phi1d=dprepare['phi1d'])
# ----------------------
# fit
out = self.fit1d(
xi=None, xj=None, data=None, mask=None,
det=None, dtheta=None, psi=None, n=None,
nlambfit=None, nphifit=None,
lambmin=None, lambmax=None,
dlines=None, spect1d=None,
dconstraints=None, dx0=None,
same_spectrum=None, dlamb=None,
double=None,
dscales=None, x0_scale=None, bounds_scale=None,
method=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=0, chain=None,
jac=None, showonly=None,
plot=None, fs=None, dmargin=None,
tit=None, wintit=None, returnas=None,
)
pass
def fit2d_dinput(
self, dlines=None, dconstraints=None, dprepare=None,
data=None, xi=None, xj=None, n=None,
det=None, dtheta=None, psi=None,
mask=None, domain=None, pos=None, binning=None, subset=None,
# lphi=None, lphi_tol=None,
deg=None, knots=None, nbsplines=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None, valid_return_fract=None,
):
""" Return a formatted dict of lines and constraints
To be fed to _fit12d.multigausfit1d_from_dlines()
Provides a user-friendly way of defining constraints
"""
import tofu.spectro._fit12d as _fit12d
if dprepare is None:
# ----------------------
# Geometrical transform
xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)
nxi = xi.size if xi is not None else np.unique(xii).size
nxj = xj.size if xj is not None else np.unique(xjj).size
# Compute lamb / phi
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
# ----------------------
# Prepare input data (domain, binning, subset, noise...)
dprepare = _fit12d.multigausfit2d_from_dlines_prepare(
data, lamb, phi,
mask=mask, domain=domain,
pos=pos, binning=binning,
nbsplines=nbsplines, subset=subset,
nxi=nxi, nxj=nxj,
) # , lphi=lphi, lphi_tol=lphi_tol)
return _fit12d.fit2d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
deg=deg, knots=knots, nbsplines=nbsplines,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,
valid_return_fract=valid_return_fract)
def fit2d(
self,
# Input data kwdargs
data=None, xi=None, xj=None,
det=None, dtheta=None, psi=None, n=None,
dinput=None, dprepare=None, dlines=None, dconstraints=None,
mask=None, domain=None, subset=None, pos=None, binning=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None,
deg=None, knots=None, nbsplines=None,
# Optimization kwdargs
dx0=None, dscales=None, x0_scale=None, bounds_scale=None,
method=None, tr_solver=None, tr_options=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=None, chain=None, jac=None, showonly=None,
predeclare=None, debug=None,
# Results extraction kwdargs
amp=None, coefs=None, ratio=None,
Ti=None, width=None, vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
# Saving and plotting kwdargs
save=None, name=None, path=None,
plot=None, fs=None, dmargin=None,
tit=None, wintit=None, returnas=None,
):
# npts=None, dax=None,
# spect1d=None, nlambfit=None,
# plotmode=None, angunits=None, indspect=None,
# cmap=None, vmin=None, vmax=None):
""" Perform 2d fitting of a 2d spectrometre image
Fit the spectrum by a sum of gaussians
Modulate each gaussian parameters by bsplines in the spatial direction
data must be provided in shape (nt, nxi, nxj), where:
- nt is the number of time steps
- nxi is the nb. of pixels in the horizontal / spectral direction
- nxj is the nb. of pixels in the vertical / spacial direction
"""
# ----------------------
# Geometrical transform in dprepare
if dinput is None:
dinput = self.fit2d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, xi=xi, xj=xj, n=n,
det=det, dtheta=dtheta, psi=psi,
mask=mask, domain=domain,
pos=pos, binning=binning, subset=subset,
deg=deg, knots=knots, nbsplines=nbsplines,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width)
# ----------------------
# return
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit2d(
dinput=dinput, dprepare=dprepare,
dlines=dlines, dconstraints=dconstraints,
lamb=lamb, phi=phi, data=data, mask=mask,
nxi=dinput['dprepare']['nxi'], nxj=dinput['dprepare']['nxj'],
domain=domain, pos=pos, binning=binning, subset=subset,
deg=deg, knots=knots, nbsplines=nbsplines,
method=method, tr_solver=tr_solver, tr_options=tr_options,
xtol=xtol, ftol=ftol, gtol=gtol,
max_nfev=max_nfev, loss=loss, chain=chain,
dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,
jac=jac, verbose=verbose,
save=save, name=name, path=path,
plot=plot)
@staticmethod
def fit2d_extract(dfit2d=None,
amp=None, Ti=None, vi=None,
pts_phi=None, npts_phi=None,
pts_lamb_phi_total=None,
pts_lamb_phi_detail=None):
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit2d_extract_data(
dfit2d=dfit2d,
amp=amp, Ti=Ti, vi=vi,
pts_phi=pts_phi, npts_phi=npts_phi,
pts_lamb_phi_total=pts_lamb_phi_total,
pts_lamb_phi_detail=pts_lamb_phi_detail)
def fit2d_plot(self, dfit2d=None, ratio=None,
dax=None, plotmode=None, angunits=None,
cmap=None, vmin=None, vmax=None,
dmargin=None, tit=None, wintit=None, fs=None):
dout = self.fit2d_extract(
dfit2d,
amp=amp, Ti=Ti, vi=vi,
pts_lamb_phi_total=pts_lamb_phi_total,
pts_lamb_phi_detail=pts_lamb_phi_detail)
return _plot_optics.CrystalBragg_plot_data_fit2d(
dfit2d=dfit2d, dout=dout, ratio=ratio,
dax=dax, plotmode=plotmode, angunits=angunits,
cmap=cmap, vmin=vmin, vmax=vmax,
dmargin=dmargin, tit=tit, wintit=wintit, fs=fs)
def noise_analysis(
self, data=None, xi=None, xj=None, n=None,
det=None, dtheta=None, psi=None,
mask=None, valid_fraction=None, nxerrbin=None,
margin=None, domain=None, nlamb=None,
deg=None, knots=None, nbsplines=None,
loss=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
method=None, tr_solver=None, tr_options=None,
verbose=None, plot=None,
ms=None, dcolor=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save_fig=None, name_fig=None, path_fig=None,
fmt=None, return_dax=None,
):
# ----------------------
# Geometrical transform
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xi, xj=xj, det=det,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
import tofu.spectro._fit12d as _fit12d
return _fit12d.noise_analysis_2d(
data, lamb, phi,
mask=mask, valid_fraction=valid_fraction,
margin=margin, nxerrbin=nxerrbin,
nlamb=nlamb, deg=deg, knots=knots, nbsplines=nbsplines,
loss=loss, max_nfev=max_nfev,
xtol=xtol, ftol=ftol, gtol=gtol,
method=method, tr_solver=tr_solver, tr_options=tr_options,
verbose=verbose, plot=plot,
ms=ms, dcolor=dcolor,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,
fmt=fmt, return_dax=return_dax)
@staticmethod
def noise_analysis_plot(
dnoise=None, margin=None, valid_fraction=None,
ms=None, dcolor=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save=None, name=None, path=None, fmt=None,
):
import tofu.spectro._plot as _plot_spectro
return _plot_spectro.plot_noise_analysis(
dnoise=dnoise, margin=margin, valid_fraction=valid_fraction,
ms=ms, dcolor=dcolor,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save=save, name=name, path=path, fmt=fmt)
def noise_analysis_scannbs(
self, data=None, xi=None, xj=None, n=None,
det=None, dtheta=None, psi=None,
mask=None, nxerrbin=None,
domain=None, nlamb=None,
deg=None, knots=None, nbsplines=None, lnbsplines=None,
loss=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
method=None, tr_solver=None, tr_options=None,
verbose=None, plot=None,
ms=None, dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save_fig=None, name_fig=None, path_fig=None,
fmt=None, return_dax=None,
):
# ----------------------
# Geometrical transform
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xi, xj=xj, det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
import tofu.spectro._fit12d as _fit12d
return _fit12d.noise_analysis_2d_scannbs(
data, lamb, phi,
mask=mask, nxerrbin=nxerrbin, nlamb=nlamb,
deg=deg, knots=knots, nbsplines=nbsplines, lnbsplines=lnbsplines,
loss=loss, max_nfev=max_nfev,
xtol=xtol, ftol=ftol, gtol=gtol,
method=method, tr_solver=tr_solver, tr_options=tr_options,
verbose=verbose, plot=plot,
ms=ms, dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,
fmt=fmt, return_dax=return_dax)
@staticmethod
def noise_analysis_scannbs_plot(
dnoise_scan=None, ms=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save=None, name=None, path=None, fmt=None,
):
import tofu.spectro._plot as _plot_spectro
return _plot_spectro.plot_noise_analysis_scannbs(
dnoise=dnoise_scan, ms=ms,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save=save, name=name, path=path, fmt=fmt)
| 35.852526
| 81
| 0.521275
|
import sys
import os
import warnings
import copy
import numpy as np
import scipy.interpolate as scpinterp
import scipy.stats as scpstats
import datetime as dtm
import matplotlib.pyplot as plt
import matplotlib as mpl
from tofu import __version__ as __version__
import tofu.pathfile as tfpf
import tofu.utils as utils
from . import _def as _def
from . import _GG as _GG
from . import _core
from . import _check_optics
from . import _comp_optics as _comp_optics
from . import _plot_optics as _plot_optics
import tofu.spectro._rockingcurve as _rockingcurve
__all__ = ['CrystalBragg']
_Type = 'Tor'
_NTHREADS = 16
_RETURN_COPY = False
_USE_NON_PARALLELISM = True
class CrystalBragg(utils.ToFuObject):
_ddef = {
'Id': {
'shot': 0, 'Exp': 'dummy', 'Diag': 'dummy',
'include': [
'Mod', 'Cls', 'Exp', 'Diag', 'Name', 'shot', 'version',
],
},
'dgeom': {'Type': 'sph', 'Typeoutline': 'rect'},
'dmat': {},
'dbragg': {'braggref': np.pi/4.},
'dmisc': {'color': 'k'},
}
_dplot = {'cross':{'Elt':'P',
'dP':{'color':'k','lw':2},
'dI':{'color':'k','ls':'--','marker':'x','ms':8,'mew':2},
'dBs':{'color':'b','ls':'--','marker':'x','ms':8,'mew':2},
'dBv':{'color':'g','ls':'--','marker':'x','ms':8,'mew':2},
'dVect':{'color':'r','scale':10}},
'hor':{'Elt':'P',
'dP':{'color':'k','lw':2},
'dI':{'color':'k','ls':'--'},
'dBs':{'color':'b','ls':'--'},
'dBv':{'color':'g','ls':'--'},
'Nstep':50},
'3d':{}}
def __init_subclass__(cls, color='k', **kwdargs):
super(CrystalBragg,cls).__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(CrystalBragg._ddef)
cls._dplot = copy.deepcopy(CrystalBragg._dplot)
cls._set_color_ddef(cls._color)
@classmethod
def _set_color_ddef(cls, color):
cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color)
def __init__(self, dgeom=None, dmat=None, dbragg=None,
Id=None, Name=None, Exp=None, Diag=None, shot=None,
fromdict=None, sep=None,
SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude, color=None):
if sys.version[0]=='2':
self._dstrip = utils.ToFuObjectBase._dstrip.copy()
self.__class__._strip_init()
self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs['self']
super(CrystalBragg,self).__init__(**kwdargs)
def _reset(self):
super(CrystalBragg,self)._reset()
self._dgeom = dict.fromkeys(self._get_keys_dgeom())
self._dmat = dict.fromkeys(self._get_keys_dmat())
self._dbragg = dict.fromkeys(self._get_keys_dbragg())
self._dmisc = dict.fromkeys(self._get_keys_dmisc())
@classmethod
def _checkformat_inputs_Id(cls, Id=None, Name=None,
Exp=None, Diag=None, shot=None, Type=None,
include=None,
**kwdargs):
if Id is not None:
assert isinstance(Id,utils.ID)
Name, Exp, Type = Id.Name, Id.Exp, Id.Type
if Type is None:
Type = cls._ddef['dgeom']['Type']
if Exp is None:
Exp = cls._ddef['Id']['Exp']
if Diag is None:
Diag = cls._ddef['Id']['Diag']
if shot is None:
shot = cls._ddef['Id']['shot']
if include is None:
include = cls._ddef['Id']['include']
dins = {'Name':{'var':Name, 'cls':str},
'Exp': {'var':Exp, 'cls':str},
'Diag': {'var':Diag, 'cls':str},
'shot': {'var':shot, 'cls':int},
'Type': {'var':Type, 'in':['sph']},
'include':{'var':include, 'listof':str}}
dins, err, msg = cls._check_InputsGeneric(dins)
if err:
raise Exception(msg)
kwdargs.update({'Name':Name, 'shot':shot,
'Exp':Exp, 'Diag':Diag, 'Type':Type,
'include':include})
return kwdargs
rgs
@staticmethod
def _get_largs_dmat():
largs = ['dmat']
return largs
@staticmethod
def _get_largs_dbragg():
largs = ['dbragg']
return largs
@staticmethod
def _get_largs_dmisc():
largs = ['color']
return largs
ummit', 'center', 'extenthalf', 'surface',
'nin', 'nout', 'e1', 'e2', 'rcurve',
'move', 'move_param', 'move_kwdargs']
return lk
@staticmethod
def _get_keys_dmat():
lk = ['formula', 'density', 'symmetry',
'lengths', 'angles', 'cut', 'd',
'alpha', 'beta', 'nin', 'nout', 'e1', 'e2']
return lk
@staticmethod
def _get_keys_dbragg():
lk = ['rockingcurve', 'lambref', 'braggref']
return lk
@staticmethod
def _get_keys_dmisc():
lk = ['color']
return lk
allkwds = dict(locals(), **kwdargs)
largs = self._get_largs_dgeom()
kwds = self._extract_kwdargs(allkwds, largs)
self.set_dgeom(**kwds)
largs = self._get_largs_dmat()
kwds = self._extract_kwdargs(allkwds, largs)
self.set_dmat(**kwds)
largs = self._get_largs_dbragg()
kwds = self._extract_kwdargs(allkwds, largs)
self.set_dbragg(**kwds)
largs = self._get_largs_dmisc()
kwds = self._extract_kwdargs(allkwds, largs)
self._set_dmisc(**kwds)
self._dstrip['strip'] = 0
dgeom=dgeom, ddef=self._ddef['dgeom'],
valid_keys=self._get_keys_dgeom(),
)
if self._dgeom['move'] is not None:
self.set_move(
move=self._dgeom['move'],
param=self._dgeom['move_param'],
**self._dgeom['move_kwdargs'],
)
def set_dmat(self, dmat=None):
self._dmat = _check_optics._checkformat_dmat(
dmat=dmat, dgeom=self._dgeom,
ddef=self._ddef['dmat'],
valid_keys=self._get_keys_dmat()
)
def set_dbragg(self, dbragg=None):
self._dbragg = _check_optics._checkformat_dbragg(
dbragg=dbragg,
ddef=self._ddef['dbragg'],
valid_keys=self._get_keys_dbragg(),
dmat=self._dmat,
)
def _set_color(self, color=None):
color = _check_optics._checkformat_inputs_dmisc(
color=color, ddef=self._ddef,
)
self._dmisc['color'] = color
self._dplot['cross']['dP']['color'] = color
self._dplot['hor']['dP']['color'] = color
def _set_dmisc(self, color=None):
self._set_color(color)
bject._strip_dict(self._dgeom, lkeep=lkeep)
def _strip_dmat(self, lkeep=None):
lkeep = self._get_keys_dmat()
utils.ToFuObject._strip_dict(self._dmat, lkeep=lkeep)
def _strip_dbragg(self, lkeep=None):
lkeep = self._get_keys_dbragg()
utils.ToFuObject._strip_dict(self._dbragg, lkeep=lkeep)
def _strip_dmisc(self, lkeep=['color']):
utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep)
tils.ToFuObject._test_Rebuild(self._dgeom, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dgeom,
lkeep=lkeep, dname='dgeom')
self._set_dgeom(dgeom=self._dgeom)
def _rebuild_dmat(self, lkeep=None):
lkeep = self._get_keys_dmat()
reset = utils.ToFuObject._test_Rebuild(self._dmat, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dmat,
lkeep=lkeep, dname='dmat')
self.set_dmat(self._dmat)
def _rebuild_dbragg(self, lkeep=None):
lkeep = self._get_keys_dbragg()
reset = utils.ToFuObject._test_Rebuild(self._dbragg, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dbragg,
lkeep=lkeep, dname='dbragg')
self.set_dbragg(self._dbragg)
def _rebuild_dmisc(self, lkeep=['color']):
reset = utils.ToFuObject._test_Rebuild(self._dmisc, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(self._dmisc,
lkeep=lkeep, dname='dmisc')
self._set_dmisc(color=self.dmisc['color'])
ax(cls._dstrip['allowed'])
doc = """
1: Remove nothing"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax)
if sys.version[0]=='2':
cls.strip.__func__.__doc__ = doc
else:
cls.strip.__doc__ = doc
def strip(self, strip=0):
super(CrystalBragg, self).strip(strip=strip)
def _strip(self, strip=0):
if strip==0:
self._rebuild_dgeom()
self._rebuild_dmat()
self._rebuild_dbragg()
self._rebuild_dmisc()
else:
self._strip_dgeom()
self._strip_dmat()
self._strip_dbragg()
self._strip_dmisc()
def _to_dict(self):
dout = {'dgeom':{'dict':self._dgeom, 'lexcept':None},
'dmat':{'dict':self._dmat, 'lexcept':None},
'dbragg':{'dict':self._dbragg, 'lexcept':None},
'dmisc':{'dict':self._dmisc, 'lexcept':None},
'dplot':{'dict':self._dplot, 'lexcept':None}}
return dout
def _from_dict(self, fd):
self._dgeom.update(**fd.get('dgeom', {}))
self._dmat.update(**fd.get('dmat', {}))
self._dbragg.update(**fd.get('dbragg', {}))
self._dmisc.update(**fd.get('dmisc', {}))
self._dplot.update(**fd.get('dplot', {}))
@property
def Type(self):
return self._Id.Type
@property
def dgeom(self):
return self._dgeom
@property
def dmat(self):
return self._dmat
@property
def dbragg(self):
return self._dbragg
@property
def dmisc(self):
return self._dmisc
@property
def summit(self):
return self._dgeom['summit']
@property
def center(self):
return self._dgeom['center']
@property
def ismobile(self):
return self._dgeom['move'] not in [None, False]
@property
def rockingcurve(self):
if self._dbragg.get('rockingcurve') is not None:
if self._dbragg['rockingcurve'].get('type') is not None:
return self._dbragg['rockingcurve']
raise Exception("rockingcurve was not set!")
def get_unit_vectors(self, use_non_parallelism=None):
if use_non_parallelism is None:
use_non_parallelism = _USE_NON_PARALLELISM
if use_non_parallelism is True:
nout = self._dmat['nout']
e1 = self._dmat['e1']
e2 = self._dmat['e2']
else:
nout = self._dgeom['nout']
e1 = self._dgeom['e1']
e2 = self._dgeom['e2']
return nout, e1, e2, use_non_parallelism
def set_color(self, col):
self._set_color(col)
def get_color(self):
return self._dmisc['color']
def get_summary(self, sep=' ', line='-', just='l',
table_sep=None, verb=True, return_=False):
col0 = [
'formula', 'symmetry', 'cut', 'density',
'd (A)',
'bragg({:9.6} A) (deg)'.format(self._dbragg['lambref']*1e10),
'Type', 'outline', 'surface (cm²)', 'rcurve', 'rocking curve',
]
ar0 = [self._dmat['formula'], self._dmat['symmetry'],
str(self._dmat['cut']), str(self._dmat['density']),
'{0:5.3f}'.format(self._dmat['d']*1.e10),
str(self._dbragg['braggref']*180./np.pi),
self._dgeom['Type'], self._dgeom['Typeoutline'],
'{0:5.1f}'.format(self._dgeom['surface']*1.e4),
'{0:6.3f}'.format(self._dgeom['rcurve'])]
try:
ar0.append(self.rockingcurve['type'])
except Exception as err:
ar0.append('None')
col1 = ['half-extent', 'summit', 'center', 'nout', 'e1',
'alpha', 'beta']
ar1 = [
str(np.round(self._dgeom['extenthalf'], decimals=3)),
str(np.round(self._dgeom['summit'], decimals=2)),
str(np.round(self._dgeom['center'], decimals=2)),
str(np.round(self._dmat['nout'], decimals=3)),
str(np.round(self._dmat['e1'], decimals=3)),
str(np.round(self._dmat['alpha'], decimals=6)),
str(np.round(self._dmat['beta'], decimals=6)),
]
if self._dgeom.get('move') not in [None, False]:
col1 += ['move', 'param']
ar1 += [self._dgeom['move'],
str(np.round(self._dgeom['move_param'], decimals=5))]
if self._dmisc.get('color') is not None:
col1.append('color')
ar1.append(str(self._dmisc['color']))
lcol = [col0, col1]
lar = [ar0, ar1]
return self._get_summary(lar, lcol,
sep=sep, line=line, table_sep=table_sep,
verb=verb, return_=return_)
def _update_or_copy(self, dgeom, pinhole=None,
return_copy=None,
name=None, diag=None, shot=None):
if return_copy is None:
return_copy = _RETURN_COPY
for kk, vv in self._dgeom.items():
if kk not in dgeom.keys():
dgeom[kk] = vv
if return_copy is True:
if name is None:
name = self.Id.Name + 'copy'
if diag is None:
diag = self.Id.Diag
if shot is None:
diag = self.Id.shot
return self.__class__(dgeom=dgeom,
dbragg=self._dbragg,
dmat=self._dmat,
color=self._dmisc['color'],
Exp=self.Id.Exp,
Diag=diag,
Name=name,
shot=shot,
SavePath=self.Id.SavePath)
else:
dgeom0 = self.dgeom
try:
self.set_dgeom(dgeom=dgeom)
self._dmat = _check_optics._checkformat_dmat(
dmat={
k0: v0 for k0, v0 in self._dmat.items()
if k0 not in ['nin', 'nout', 'e1', 'e2']
},
dgeom=self._dgeom,
ddef=self._ddef['dmat'],
valid_keys=self._get_keys_dmat()
)
except Exception as err:
self.set_dgeom(dgeom=dgeom0)
msg = (str(err)
+ "\nAn exception occured during updating\n"
+ " => instance unmoved")
raise Exception(msg)
def _rotate_or_translate(self, func, **kwdargs):
pts = np.array([self._dgeom['summit'], self._dgeom['center']]).T
if 'rotate' in func.__name__:
vect = np.array([
self._dgeom['nout'],
self._dgeom['e1'],
self._dgeom['e2']
]).T
pts, vect = func(pts=pts, vect=vect, **kwdargs)
return {'summit': pts[:, 0], 'center': pts[:, 1],
'nout': vect[:, 0], 'nin': -vect[:, 0],
'e1': vect[:, 1], 'e2': vect[:, 2]}
else:
pts = func(pts=pts, **kwdargs)
return {'summit': pts[:, 0], 'center': pts[:, 1]}
def translate_in_cross_section(self, distance=None, direction_rz=None,
phi=None,
return_copy=None,
diag=None, name=None, shot=None):
if phi is None:
phi = np.arctan2(*self.summit[1::-1])
msg = ("Poloidal plane was not explicitely specified\n"
+ " => phi set to self.summit's phi ({})".format(phi))
warnings.warn(msg)
dgeom = self._rotate_or_translate(
self._translate_pts_poloidal_plane,
phi=phi, direction_rz=direction_rz, distance=distance)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def translate_3d(self, distance=None, direction=None,
return_copy=None,
diag=None, name=None, shot=None):
dgeom = self._rotate_or_translate(
self._translate_pts_3d,
direction=direction, distance=distance)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def rotate_in_cross_section(self, angle=None, axis_rz=None,
phi=None,
return_copy=None,
diag=None, name=None, shot=None):
if phi is None:
phi = np.arctan2(*self.summit[1::-1])
msg = ("Poloidal plane was not explicitely specified\n"
+ " => phi set to self.summit's phi ({})".format(phi))
warnings.warn(msg)
dgeom = self._rotate_or_translate(
self._rotate_pts_vectors_in_poloidal_plane,
axis_rz=axis_rz, angle=angle, phi=phi)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def rotate_around_torusaxis(self, angle=None,
return_copy=None,
diag=None, name=None, shot=None):
dgeom = self._rotate_or_translate(
self._rotate_pts_vectors_around_torusaxis,
angle=angle)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def rotate_around_3daxis(self, angle=None, axis=None,
return_copy=None,
diag=None, name=None, shot=None):
dgeom = self._rotate_or_translate(
self._rotate_pts_vectors_around_3daxis,
axis=axis, angle=angle)
return self._update_or_copy(dgeom,
return_copy=return_copy,
diag=diag, name=name, shot=shot)
def set_move(self, move=None, param=None, **kwdargs):
move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs)
self._dgeom['move'] = move
self._dgeom['move_param'] = param
if isinstance(kwdargs, dict) and len(kwdargs) == 0:
kwdargs = None
self._dgeom['move_kwdargs'] = kwdargs
def move(self, param):
param = self._move(param, dictname='_dgeom')
self._dgeom['move_param'] = param
def get_rockingcurve_func(self, lamb=None, n=None):
drock = self.rockingcurve
if drock['type'] == 'tabulated-1d':
if lamb is not None and lamb != drock['lamb']:
msg = ("rocking curve was tabulated only for:\n"
+ "\tlamb = {} m\n".format(lamb)
+ " => Please let lamb=None")
raise Exception(msg)
lamb = drock['lamb']
bragg = self._checkformat_bragglamb(lamb=lamb, n=n)
func = scpinterp.interp1d(drock['dangle'] + bragg, drock['value'],
kind='linear', bounds_error=False,
fill_value=0, assume_sorted=True)
elif drock['type'] == 'tabulated-2d':
lmin, lmax = drock['lamb'].min(), drock['lamb'].max()
if lamb is None:
lamb = drock['lamb']
if lamb < lmin or lamb > lmax:
msg = ("rocking curve was tabulated only in interval:\n"
+ "\tlamb in [{}; {}] m\n".format(lmin, lmax)
+ " => Please set lamb accordingly")
raise Exception(msg)
bragg = self._checkformat_bragglamb(lamb=lamb, n=n)
def func(angle, lamb=lamb, bragg=bragg, drock=drock):
return scpinterp.interp2d(drock['dangle']+bragg, drock['lamb'],
drock['value'], kind='linear',
bounds_error=False, fill_value=0,
assume_sorted=True)(angle, lamb)
else:
raise NotImplementedError
def func(angle, d=d, delta_bragg=delta_bragg,
Rmax=drock['Rmax'], sigma=drock['sigma']):
core = sigma**2/((angle - (bragg+delta_bragg))**2 + sigma**2)
if Rmax is None:
return core/(sigma*np.pi)
else:
return Rmax*core
return func, lamb, bragg
def plot_rockingcurve(self, lamb=None, n=None, sigma=None,
npts=None, color=None, ang_units=None,
dmargin=None, fs=None, ax=None, legend=None):
drock = self.rockingcurve
func, lamb, bragg = self.get_rockingcurve_func(lamb=lamb, n=n)
axtit = 'Rocking curve for ' + self.Id.Name
return _plot_optics.CrystalBragg_plot_rockingcurve(
func=func, bragg=bragg, lamb=lamb,
sigma=sigma, npts=npts,
ang_units=ang_units, axtit=axtit, color=color,
fs=fs, ax=ax, legend=legend)
def compute_rockingcurve(
self, ih=None, ik=None, il=None, lamb=None,
use_non_parallelism=None, na=None,
alpha_limits=None,
therm_exp=None, plot_therm_exp=None,
plot_asf=None, plot_power_ratio=None,
plot_asymmetry=None, plot_cmaps=None,
verb=None, returnas=None,
):
return _rockingcurve.compute_rockingcurve(
ih=ih, ik=ik, il=il, lamb=lamb,
use_non_parallelism=use_non_parallelism, na=na,
alpha_limits=alpha_limits,
therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,
plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,
plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,
verb=None, returnas=None,
)
def plot_var_temp_changes_wavelengths(
self, ih=None, ik=None, il=None, lambdas=None,
use_non_parallelism=None, na=None,
alpha_limits=None,
therm_exp=None, plot_therm_exp=None,
plot_asf=None, plot_power_ratio=None,
plot_asymmetry=None, plot_cmaps=None,
quantity=None,
curv_radius=None, pixel_size=None,
):
return _rockingcurve.plot_var_temp_changes_wavelengths(
ih=ih, ik=ik, il=il, lambdas=lambdas,
use_non_parallelism=use_non_parallelism, na=na,
alpha_limits=alpha_limits,
therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,
plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,
plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,
quantity=quantity,
curv_radius=curv_radius, pixel_size=pixel_size,
)
def sample_outline_plot(self, use_non_parallelism=None, res=None):
if self._dgeom['Type'] == 'sph':
if self._dgeom['Typeoutline'] == 'rect':
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
outline = _comp_optics.CrystBragg_sample_outline_plot_sphrect(
self._dgeom['summit'] - nout*self._dgeom['rcurve'],
nout,
e1,
e2,
self._dgeom['rcurve'],
self._dgeom['extenthalf'],
res,
)
else:
raise NotImplementedError
else:
raise NotImplementedError
return outline
def _checkformat_bragglamb(self, bragg=None, lamb=None, n=None):
lc = [lamb is not None, bragg is not None]
if not any(lc):
lamb = self._dbragg['lambref']
lc[0] = True
assert np.sum(lc) == 1, "Provide lamb xor bragg!"
if lc[0]:
bragg = self.get_bragg_from_lamb(
np.atleast_1d(lamb), n=n,
)
else:
bragg = np.atleast_1d(bragg)
return bragg
def _checkformat_get_Rays_from(self, phi=None, bragg=None):
assert phi is not None
assert bragg is not None
bragg = np.atleast_1d(bragg)
phi = np.atleast_1d(phi)
nrays = max(phi.size, bragg.size)
if not phi.shape == bragg.shape:
if phi.size == 1:
phi = np.full(bragg.shape, phi[0])
elif bragg.size == 1:
bragg = np.full(phi.shape, bragg[0])
else:
msg = "phi and bragg/lamb must have the same shape!\n"
msg += " phi.shape: %s\n"%str(phi.shape)
msg += " bragg/lamb.shape: %s\n"%str(bragg.shape)
raise Exception(msg)
return phi, bragg
def _get_rays_from_cryst(
self,
phi=None, bragg=None,
lamb=None, n=None,
dtheta=None, psi=None,
ntheta=None, npsi=None,
use_non_parallelism=None,
include_summit=None,
grid=None,
):
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb)
phi, bragg = self._checkformat_get_Rays_from(phi=phi, bragg=bragg)
pts_start, nout, e1, e2 = self.get_local_noute1e2(
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
)
nin = -nout
if grid is True:
nin = nin[..., None]
e1 = e1[..., None]
e2 = e2[..., None]
else:
assert bragg.shape == nin.shape[1:]
vect = (
np.sin(bragg)*nin
+ np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2)
)
return pts_start, vect
def get_rays_from_cryst(
self,
phi=None, bragg=None,
lamb=None, n=None,
dtheta=None, psi=None,
use_non_parallelism=None,
ntheta=None, npsi=None,
include_summit=None,
det=None, config=None, length=None,
returnas=None,
return_xixj=None,
grid=None,
):
if returnas is None:
returnas = 'pts'
if return_xixj is None:
return_xixj = False
lret = ['(pts, vect, length)', '(pts, vect)', 'pts']
if returnas not in lret:
msg = (
"Arg returnas must be in:\n"
+ "\t- '(pts, vect, length)': starting points, unit vector,"
+ " length\n"
+ "\t- 'pts': starting and ending points\n"
)
raise Exception(msg)
det = self._checkformat_det(det)
if length is None:
length = 10.
if grid is None:
try:
grid = bragg.shape != dtheta.shape
except Exception as err:
grid = True
pts_start, vect = self._get_rays_from_cryst(
phi=phi, bragg=bragg,
lamb=lamb, n=n,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
grid=grid,
)
if returnas == '(pts, vect)':
return pts_start, vect
vshape = vect.shape
dk = {
k0: np.full(vshape[1:], np.nan)
for k0 in ['config', 'det', 'length']
}
xi, xj = None, None
if config is not None:
if vshape != pts_start.shape:
if len(vshape) == 3 and len(pts_start.shape) == 2:
D = np.reshape(
np.repeat(pts_start[..., None], vshape[-1], axis=-1),
(3, -1),
)
u = vect.reshape((3, -1))
else:
msg = (
"Not treated case!\n"
f"\t- pts_start.shape: {pts_start.shape}\n"
f"\t- vect.shape: {vshape}\n"
)
raise Exception(msg)
else:
if len(vshape) > 2:
D = pts_start.reshape((3, -1))
u = vect.reshape((3, -1))
else:
D = pts_start
u = vect
rays = _core.Rays(
dgeom=(D, u),
config=config,
strict=False,
Name='dummy',
Diag='dummy',
Exp='dummy',
)
if u.shape != vshape:
kout = rays.dgeom['kOut'].reshape(vshape[1:])
else:
kout = rays.dgeom['kOut']
dk['config'] = kout
if det is not None and det is not False:
shape = tuple([3] + [1 for ii in range(vect.ndim-1)])
cent = det['cent'].reshape(shape)
nout = det['nout'].reshape(shape)
if grid is True:
k = (
np.sum((cent-pts_start[..., None])*nout, axis=0)
/ np.sum(vect*nout, axis=0)
)
else:
k = (
np.sum((cent-pts_start)*nout, axis=0)
/ np.sum(vect*nout, axis=0)
)
dk['det'][k >= 0.] = k[k >= 0.]
if return_xixj is True:
if grid:
pts_end = pts_start[..., None] + dk['det'][None, ...]*vect
else:
pts_end = pts_start + dk['det'][None, ...]*vect
ei = det['ei'].reshape(shape)
ej = det['ej'].reshape(shape)
xi = np.sum((pts_end - cent)*ei, axis=0)
xj = np.sum((pts_end - cent)*ej, axis=0)
if length is not None:
dk['length'][:] = length
k = np.nanmin([vv for vv in dk.values() if vv is not None], axis=0)
if returnas == 'pts':
if grid:
pts_end = pts_start[..., None] + k[None, ...]*vect
if return_xixj:
return pts_start, pts_end, xi, xj
else:
return pts_start, pts_end
else:
pts_end = pts_start + k[None, ...]*vect
if return_xixj:
return pts_start, pts_end, xi, xj
else:
return pts_start, pts_end
elif returnas == '(pts, vect, length)':
if return_xixj:
return pts_start, vect, k, xi, xj
else:
return pts_start, vect, k
def split(self, direction=None, nb=None):
if direction is None:
direction = 'e1'
if direction not in ['e1', 'e2']:
msg = (
"Arg direction must be either:\n"
"\t- 'e1': split along vector 'e1' (~horizontally)\n"
"\t- 'e2': split along vector 'e2' (~vertically)\n"
f"You provided: {direction}"
)
raise Exception(msg)
if nb is None:
nb = 2
if not (isinstance(nb, int) and nb > 1):
msg = (
"Arg nb must be a int > 1 !\n"
"It specifies the number of equal parts desired\n"
f"You provided: {nb}"
)
raise Exception(msg)
edges = np.linspace(-1, 1, nb+1)
mid = 0.5*(edges[1:] + edges[:-1])[None, :]
if direction == 'e2':
dtheta = mid*self._dgeom['extenthalf'][1]
psi = np.zeros((1, nb), dtype=float)
extenthalf = [
self._dgeom['extenthalf'][0],
self._dgeom['extenthalf'][1]/nb,
]
else:
dtheta = np.zeros((1, nb), dtype=float)
psi = mid*self._dgeom['extenthalf'][0]
extenthalf = [
self._dgeom['extenthalf'][0]/nb,
self._dgeom['extenthalf'][1],
]
nouts = (
np.cos(dtheta)*(
self._dgeom['nout'][:, None]*np.cos(psi)
+ self._dgeom['e1'][:, None]*np.sin(psi)
)
+ np.sin(dtheta)*self._dgeom['e2'][:, None]
)
e1s = (
-self._dgeom['nout'][:, None]*np.sin(psi)
+ self._dgeom['e1'][:, None]*np.cos(psi)
)
e2s = np.array([
nouts[1, :]*e1s[2, :] - nouts[2, :]*e1s[1, :],
nouts[2, :]*e1s[0, :] - nouts[0, :]*e1s[2, :],
nouts[0, :]*e1s[1, :] - nouts[1, :]*e1s[0, :],
])
lobj = [
self.__class__(
dgeom={
'rcurve': self._dgeom['rcurve'],
'center': self._dgeom['center'],
'nout': nouts[:, ii],
'e1': e1s[:, ii],
'e2': e2s[:, ii],
'extenthalf': extenthalf,
},
dmat={
k0: v0 for k0, v0 in self._dmat.items()
if k0 not in ['nin', 'nout', 'e1', 'e2']
},
dbragg=dict(self._dbragg),
Name=f"{self.Id.Name}{ii}",
Exp=self.Id.Exp,
)
for ii in range(nb)
]
return lobj
def plot(
self, dcryst=None,
phi=None, bragg=None, lamb=None, pts=None,
n=None, config=None, det=None, length=None,
dtheta=None, psi=None,
ntheta=None, npsi=None,
include_summit=None,
dax=None, proj=None, res=None, element=None,
color=None, ddet=None,
dleg=None, draw=True, dmargin=None,
use_non_parallelism=None, grid=None,
rays_npts=None, rays_color=None,
fs=None, wintit=None, tit=None,
):
if det is None:
det = False
det = self._checkformat_det(det)
lc = [
dtheta is not None or psi is not None or phi is not None,
pts is not None
]
if np.sum(lc) == 2:
msg = (
"For ray tracing, please provide either:\n"
+ "\t- dtheta, psi, phi, lamb/bragg\n"
+ "\t- pts, lamb/bragg\n"
)
raise Exception(msg)
if lc[0]:
pts_summit, pts1 = self.get_rays_from_cryst(
phi=phi, lamb=lamb, bragg=bragg,
n=n, use_non_parallelism=use_non_parallelism,
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
config=config, det=det,
returnas='pts', return_xixj=False,
grid=grid,
)
pts2, xi, xj = self.get_rays_from_cryst(
phi=phi+np.pi, lamb=lamb, bragg=bragg,
n=n, use_non_parallelism=use_non_parallelism,
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
config=config, det=det,
returnas='pts', return_xixj=True,
grid=grid,
)[1:]
elif lc[1]:
c0 = (
isinstance(pts, np.ndarray)
and pts.ndim == 2
and pts.shape[0] == 3
)
if not c0:
msg = ("Arg pts must be a (3, npts) np.array!")
raise Exception(msg)
dtheta, psi, phi, bragg, _, _ = self.calc_raytracing_from_lambpts(
pts=pts,
lamb=lamb,
ndtheta=ntheta,
)
pts_summit, pts2, xi, xj = self.get_rays_from_cryst(
phi=phi+np.pi, lamb=None, bragg=bragg,
n=n, use_non_parallelism=use_non_parallelism,
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
config=config, det=det,
returnas='pts', return_xixj=True,
grid=grid,
)
pts1 = np.repeat(
np.repeat(
np.repeat(
pts[:, None, :], dtheta.shape[0], axis=1,
)[..., None],
dtheta.shape[2],
axis=-1,
)[..., None],
2,
axis=-1,
)
else:
pts_summit, pts1, pts2, xi, xj = None, None, None, None, None
return _plot_optics.CrystalBragg_plot(
cryst=self, dcryst=dcryst,
det=det, ddet=ddet,
dax=dax, proj=proj, res=res, element=element,
color=color,
pts_summit=pts_summit, pts1=pts1, pts2=pts2,
xi=xi, xj=xj,
rays_color=rays_color, rays_npts=rays_npts,
dleg=dleg, draw=draw, fs=fs, dmargin=dmargin,
use_non_parallelism=use_non_parallelism,
wintit=wintit, tit=tit,
)
def get_phi_from_magaxis_summit(
self,
axis_r,
axis_z,
axis_npts=None,
lamb=None,
lamb_tol=None,
bragg=None,
n=None,
use_non_parallelism=None,
):
if axis_npts is None:
axis_npts = 1000
axis_r = np.atleast_1d(axis_r)
axis_z = np.atleast_1d(axis_z)
assert axis_r.shape == axis_z.shape
if lamb_tol is None:
lamb_tol = 0.01e-10
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
shaperz = axis_r.shape
phi_ax = np.full(shaperz, np.nan)
theta_cryst = np.arctan2(
self._dgeom['summit'][1],
self._dgeom['summit'][0],
)
theta_ax = theta_cryst + np.pi/2*np.linspace(-1, 1, axis_npts)
shapetheta = np.r_[[1 for ii in shaperz], axis_npts]
theta_ax = theta_ax.reshape(shapetheta)
axis_x = (axis_r[..., None] * np.cos(theta_ax)).ravel()
axis_y = (axis_r[..., None] * np.sin(theta_ax)).ravel()
axis_z = (np.repeat(axis_z[..., None], axis_npts, axis=-1)).ravel()
(
bragg_ax_full, phi_ax_full, lamb_ax_full,
) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
pts=np.array([axis_x, axis_y, axis_z]),
dtheta=None, psi=None,
ntheta=None, npsi=None,
n=None,
use_non_parallelism=use_non_parallelism,
grid=None,
return_lamb=True,
)
shape_full = tuple(np.r_[shaperz, axis_npts])
lamb_ax_full = lamb_ax_full.reshape(shape_full)
phi_ax_full = phi_ax_full.reshape(shape_full)
dlamb = np.abs(lamb_ax_full - lamb)
indok = np.any(dlamb <= lamb_tol, axis=-1)
indmin = np.nanargmin(dlamb[indok, :], axis=-1)
indtup = tuple([iii for iii in indok.nonzero()] + [indmin])
phi_ax[indok] = phi_ax_full[indtup]
return phi_ax
def get_bragg_from_lamb(self, lamb=None, n=None):
if self._dmat['d'] is None:
msg = "Interplane distance d no set !\n"
msg += " => self.set_dmat({'d':...})"
raise Exception(msg)
if lamb is None:
lamb = self._dbragg['lambref']
return _comp_optics.get_bragg_from_lamb(
np.atleast_1d(lamb), self._dmat['d'], n=n,
)
def get_lamb_from_bragg(self, bragg=None, n=None):
if self._dmat['d'] is None:
msg = "Interplane distance d no set !\n"
msg += " => self.set_dmat({'d':...})"
raise Exception(msg)
if bragg is None:
bragg = self._dbragg['braggref']
return _comp_optics.get_lamb_from_bragg(np.atleast_1d(bragg),
self._dmat['d'], n=n)
def update_non_parallelism(self, alpha=None, beta=None):
if alpha is None:
alpha = 0
if beta is None:
beta = 0
(self._dmat['nin'], self._dmat['nout'], self._dmat['e1'],
self._dmat['e2']) = _comp_optics.get_vectors_from_angles(
alpha, beta,
self._dgeom['nout'], self._dgeom['e1'],
self._dgeom['e2'],
)
self._dmat['alpha'], self._dmat['beta'] = alpha, beta
def calc_meridional_sagital_focus(
self,
rcurve=None,
bragg=None,
alpha=None,
use_non_parallelism=None,
verb=None,
):
if rcurve is None:
rcurve = self._dgeom['rcurve']
if bragg is None:
bragg = self._dbragg['braggref']
if use_non_parallelism is True:
alpha = self._dmat['alpha']
if use_non_parallelism is False:
alpha = 0.0
return _comp_optics.calc_meridional_sagital_focus(
rcurve=rcurve,
bragg=bragg,
alpha=alpha,
use_non_parallelism=use_non_parallelism,
verb=verb,
)
def get_rowland_dist_from_lambbragg(self, bragg=None, lamb=None, n=None):
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
if np.all(np.isnan(bragg)):
msg = ("There is no available bragg angle!\n"
+ " => Check the vlue of self.dmat['d'] vs lamb")
raise Exception(msg)
return _comp_optics.get_rowland_dist_from_bragg(
bragg=bragg, rcurve=self._dgeom['rcurve'],
)
def get_detector_ideal(
self,
bragg=None, lamb=None,
rcurve=None, n=None,
ddist=None, di=None, dj=None,
dtheta=None, dpsi=None, tilt=None,
lamb0=None, lamb1=None, dist01=None,
use_non_parallelism=None,
tangent_to_rowland=None, plot=False,
):
if rcurve is None:
rcurve = self._dgeom['rcurve']
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
if np.all(np.isnan(bragg)):
msg = ("There is no available bragg angle!\n"
+ " => Check the vlue of self.dmat['d'] vs lamb")
raise Exception(msg)
lc = [lamb0 is not None, lamb1 is not None, dist01 is not None]
if any(lc) and not all(lc):
msg = (
"Arg lamb0, lamb1 and dist01 must be provided together:\n"
+ "\t- lamb0: line0 wavelength ({})\n".format(lamb0)
+ "\t- lamb1: line1 wavelength ({})\n".format(lamb1)
+ "\t- dist01: distance (m) on detector between lines "
+ "({})".format(dist01)
)
raise Exception(msg)
bragg01 = None
if all(lc):
bragg01 = self._checkformat_bragglamb(
lamb=np.r_[lamb0, lamb1], n=n,
)
lc = [rcurve is None, self._dgeom['summit'] is None]
if any(lc):
msg = (
"Some missing fields in dgeom for computation:"
+ "\n\t-" + "\n\t-".join(['rcurve'] + 'summit')
)
raise Exception(msg)
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
lc = [cc is None for cc in [nout, e1, e2]]
if any(lc):
msg = (
"""
Field 'nout', 'e1', 'e2' missing!
"""
)
raise Exception(msg)
(det_dist, n_crystdet_rel,
det_nout_rel, det_ei_rel) = _comp_optics.get_approx_detector_rel(
rcurve, bragg,
bragg01=bragg01, dist01=dist01,
tangent_to_rowland=tangent_to_rowland)
det_cent, det_nout, det_ei, det_ej = _comp_optics.get_det_abs_from_rel(
det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel,
self._dgeom['summit'], nout, e1, e2,
ddist=ddist, di=di, dj=dj,
dtheta=dtheta, dpsi=dpsi, tilt=tilt)
if plot:
dax = self.plot()
p0 = np.repeat(det_cent[:,None], 3, axis=1)
vv = np.vstack((det_nout, det_ei, det_ej)).T
dax['cross'].plot(np.hypot(det_cent[0], det_cent[1]),
det_cent[2], 'xb')
dax['hor'].plot(det_cent[0], det_cent[1], 'xb')
dax['cross'].quiver(np.hypot(p0[0, :], p0[1, :]), p0[2, :],
np.hypot(vv[0, :], vv[1, :]), vv[2, :],
units='xy', color='b')
dax['hor'].quiver(p0[0, :], p0[1, :], vv[0, :], vv[1, :],
units='xy', color='b')
return {'cent': det_cent, 'nout': det_nout,
'ei': det_ei, 'ej': det_ej}
def _checkformat_det(self, det=None):
lc = [det is None, det is False, isinstance(det, dict)]
msg = ("det must be:\n"
+ "\t- False: not det provided\n"
+ "\t- None: use default approx det from:\n"
+ "\t self.get_detector_ideal()\n"
+ "\t- dict: a dictionary of 3d (x,y,z) coordinates of a point"
+ " (local frame center) and 3 unit vectors forming a direct "
+ "orthonormal basis attached to the detector's frame\n"
+ "\t\t\t\t- 'cent': detector center\n"
+ "\t\t\t\t- 'nout': unit vector perpendicular to surface, "
+ "in direction of the crystal\n"
+ "\t\t\t\t- 'ei': unit vector, first coordinate on surface\n"
+ "\t\t\t\t- 'ej': unit vector, second coordinate on surfacei\n"
+ " You provided: {}".format(det))
if not any(lc):
raise Exception(msg)
if lc[0]:
det = self.get_detector_ideal(lamb=self._dbragg['lambref'])
elif lc[2]:
lk = ['cent', 'nout', 'ei', 'ej']
c0 = (isinstance(det, dict)
and all([(kk in det.keys()
and hasattr(det[kk], '__iter__')
and np.atleast_1d(det[kk]).size == 3
and not np.any(np.isnan(det[kk])))
for kk in lk]))
if not c0:
raise Exception(msg)
for k0 in lk:
det[k0] = np.atleast_1d(det[k0]).ravel()
return det
def get_local_noute1e2(
self,
dtheta=None, psi=None,
ntheta=None, npsi=None,
use_non_parallelism=None,
include_summit=None,
):
# Get local basis at crystal summit
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
nin = -nout
# Get vectors at any points from psi & dtheta
vout, ve1, ve2 = _comp_optics.CrystBragg_get_noute1e2_from_psitheta(
nout, e1, e2,
psi=psi, dtheta=dtheta,
e1e2=True, sameshape=False,
extenthalf_psi=self._dgeom['extenthalf'][0],
extenthalf_dtheta=self._dgeom['extenthalf'][1],
ntheta=ntheta, npsi=npsi,
include_summit=include_summit,
)
vin = -vout
# cent no longer dgeom['center'] because no longer a fixed point
cent = self._dgeom['summit'] + self._dgeom['rcurve']*nin
reshape = np.r_[3, [1 for ii in range(vout.ndim - 1)]]
cent = cent.reshape(reshape)
# Redefining summit according to nout at each point at crystal
summ = cent + self._dgeom['rcurve']*vout
return summ, vout, ve1, ve2
def calc_xixj_from_braggphi(
self,
phi=None,
bragg=None,
lamb=None,
n=None,
dtheta=None,
psi=None,
det=None,
use_non_parallelism=None,
strict=None,
return_strict=None,
data=None,
plot=True,
dax=None,
):
if return_strict is None:
return_strict = False
# Check / format inputs
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
phi = np.atleast_1d(phi)
# Check / get det
det = self._checkformat_det(det)
# Get local summit nout, e1, e2 if non-centered
if dtheta is None:
dtheta = 0.
if psi is None:
psi = 0.
# Probably to update with use_non_parallelism?
# Get back summit & vectors at any point at the crystal surface,
# according to parallelism properties
summit, nout, e1, e2 = self.get_local_noute1e2(
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
ntheta=None, npsi=None,
include_summit=False,
)
# Compute
xi, xj, strict = _comp_optics.calc_xixj_from_braggphi(
det_cent=det['cent'],
det_nout=det['nout'], det_ei=det['ei'], det_ej=det['ej'],
det_outline=det.get('outline'),
summit=summit, nout=nout, e1=e1, e2=e2,
bragg=bragg, phi=phi, strict=strict,
)
if plot:
dax = _plot_optics.CrystalBragg_plot_approx_detector_params(
bragg, xi, xj, data, dax,
)
if return_strict is True:
return xi, xj, strict
else:
return xi, xj
def plot_line_on_det_tracing(
self, lamb=None, n=None,
nphi=None,
det=None, johann=None,
use_non_parallelism=None,
lpsi=None, ldtheta=None,
strict=None,
ax=None, dleg=None,
rocking=None, fs=None, dmargin=None,
wintit=None, tit=None,
):
# Check / format inputs
if lamb is None:
lamb = self._dbragg['lambref']
lamb = np.atleast_1d(lamb).ravel()
nlamb = lamb.size
if johann is None:
johann = lpsi is not None or ldtheta is not None
if rocking is None:
rocking = False
if det is None or det.get('outline') is None:
msg = ("Please provide det as a dict with 'outline'!")
raise Exception(msg)
# Get local basis
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism,
)
nin = -nout
# Compute lamb / phi
_, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=det['outline'][0, :], xj=det['outline'][1, :], det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=False,
)
phimin, phimax = np.nanmin(phi), np.nanmax(phi)
phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10
# Get reference ray-tracing
bragg = self._checkformat_bragglamb(lamb=lamb, n=n)
if nphi is None:
nphi = 100
phi = np.linspace(phimin, phimax, nphi)
xi = np.full((nlamb, nphi), np.nan)
xj = np.full((nlamb, nphi), np.nan)
for ll in range(nlamb):
xi[ll, :], xj[ll, :] = self.calc_xixj_from_braggphi(
bragg=np.full(phi.shape, bragg[ll]),
phi=phi,
dtheta=0.,
psi=0.,
n=n,
det=det,
use_non_parallelism=use_non_parallelism,
strict=strict,
plot=False,
)
# Get johann-error raytracing (multiple positions on crystal)
xi_er, xj_er = None, None
if johann and not rocking:
if lpsi is None:
lpsi = np.linspace(-1., 1., 15)
if ldtheta is None:
ldtheta = np.linspace(-1., 1., 15)
lpsi, ldtheta = np.meshgrid(lpsi, ldtheta)
lpsi = lpsi.ravel()
ldtheta = ldtheta.ravel()
lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]
ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]
npsi = lpsi.size
assert npsi == ldtheta.size
xi_er = np.full((nlamb, npsi*nphi), np.nan)
xj_er = np.full((nlamb, npsi*nphi), np.nan)
for l in range(nlamb):
for ii in range(npsi):
i0 = np.arange(ii*nphi, (ii+1)*nphi)
xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_braggphi(
phi=phi, bragg=bragg[l], lamb=None, n=n,
dtheta=ldtheta[ii], psi=lpsi[ii],
det=det, plot=False,
use_non_parallelism=use_non_parallelism,
strict=strict,
)
# Get rocking curve error
if rocking:
pass
# Plot
return _plot_optics.CrystalBragg_plot_line_tracing_on_det(
lamb, xi, xj, xi_er, xj_er,
det=det, ax=ax, dleg=dleg,
johann=johann, rocking=rocking,
fs=fs, dmargin=dmargin, wintit=wintit, tit=tit)
def calc_johannerror(
self,
xi=None, xj=None, err=None,
det=None, n=None,
lpsi=None, ldtheta=None,
lambda_interval_min=None,
lambda_interval_max=None,
use_non_parallelism=None,
plot=True, fs=None, cmap=None,
vmin=None, vmax=None, tit=None, wintit=None,
):
# Check xi, xj once before to avoid doing it twice
if err is None:
err = 'abs'
if lambda_interval_min is None:
lambda_interval_min = 3.93e-10
if lambda_interval_max is None:
lambda_interval_max = 4.00e-10
xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)
# Check / format inputs
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
# Only one summit was selected
bragg, phi, lamb = bragg[..., 0], phi[..., 0], lamb[..., 0]
# Check lambda interval into lamb array
c0 = (
np.min(lamb) < lambda_interval_min
and np.max(lamb) > lambda_interval_max
)
if c0:
test_lambda_interv = True
else:
test_lambda_interv = False
# Get err from multiple ldtheta, lpsi
if lpsi is None:
lpsi = np.r_[-1., 0., 1., 1., 1., 0., -1, -1]
lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]
if ldtheta is None:
ldtheta = np.r_[-1., -1., -1., 0., 1., 1., 1., 0.]
ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]
npsi = lpsi.size
assert npsi == ldtheta.size
(
braggerr, phierr, lamberr,
) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=ldtheta, psi=lpsi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
err_lamb = np.nanmax(np.abs(lamb[..., None] - lamberr), axis=-1)
err_phi = np.nanmax(np.abs(phi[..., None] - phierr), axis=-1)
# absolute vs relative error
if 'rel' in err:
if err == 'rel':
err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb))
err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi))
elif err == 'rel2':
err_lamb = 100.*err_lamb / np.mean(lamb)
err_phi = 100.*err_phi / np.mean(phi)
err_lamb_units = '%'
err_phi_units = '%'
else:
err_lamb_units = 'm'
err_phi_units = 'rad'
if plot is True:
ax = _plot_optics.CrystalBragg_plot_johannerror(
xi, xj, lamb, phi,
err_lamb, err_phi,
err_lamb_units=err_lamb_units,
err_phi_units=err_phi_units,
cmap=cmap, vmin=vmin, vmax=vmax,
fs=fs, tit=tit, wintit=wintit,
)
return (
err_lamb, err_phi, err_lamb_units, err_phi_units,
test_lambda_interv,
)
def plot_focal_error_summed(
self,
dist_min=None, dist_max=None,
di_min=None, di_max=None,
ndist=None, ndi=None,
lamb=None, bragg=None,
xi=None, xj=None,
err=None,
use_non_parallelism=None,
tangent_to_rowland=None, n=None,
plot=None,
pts=None,
det_ref=None, plot_dets=None, nsort=None,
dcryst=None,
lambda_interval_min=None,
lambda_interval_max=None,
contour=None,
fs=None,
ax=None,
cmap=None,
vmin=None,
vmax=None,
return_ax=None,
):
# Check / format inputs
if dist_min is None:
dist_min = -0.15
if dist_max is None:
dist_max = 0.15
if di_min is None:
di_min = -0.40
if di_max is None:
di_max = 0.40
if ndist is None:
ndist = 21
if ndi is None:
ndi = 21
if err is None:
err = 'rel'
if plot is None:
plot = True
if plot_dets is None:
plot_dets = det_ref is not None
if nsort is None:
nsort = 5
if return_ax is None:
return_ax = True
if lambda_interval_min is None:
lambda_interval_min = 3.93e-10
if lambda_interval_max is None:
lambda_interval_max = 4.00e-10
l0 = [dist_min, dist_max, ndist, di_min, di_max, ndi]
c0 = any([l00 is not None for l00 in l0])
if not c0:
msg = (
"Please give the ranges of ddist and di translations\n"
"\t to compute the different detector's position\n"
"\t Provided:\n"
"\t\t- dist_min, dist_max, ndist: ({}, {}, {})\n".format(
dist_min, dist_max, ndist,
)
+ "\t\t- di_min, di_max, ndi: ({}, {}, {})\n".format(
di_min, di_max, ndi,
)
)
raise Exception(msg)
(
ddist0, di0, dj0,
dtheta0, dpsi0, tilt0,
) = self._get_local_coordinates_of_det(
bragg=bragg,
lamb=lamb,
det_ref=det_ref,
use_non_parallelism=use_non_parallelism,
)
tor_ideal(
lamb=lamb,
bragg=bragg,
use_non_parallelism=use_non_parallelism,
tangent_to_rowland=True,
)
det2 = self.get_detector_ideal(
lamb=lamb,
bragg=bragg,
use_non_parallelism=use_non_parallelism,
tangent_to_rowland=False,
)
cos_angle_nout = np.sum(
det1['nout'] * det2['nout']
) / (
np.linalg.norm(det1['nout'] * np.linalg.norm(det2['nout']))
)
angle_nout = np.arccos(cos_angle_nout)
ddist = np.linspace(dist_min, dist_max, int(ndist))
di = np.linspace(di_min, di_max, int(ndi))
error_lambda = np.full((di.size, ddist.size), np.nan)
test_lamb_interv = np.zeros((di.size, ddist.size), dtype='bool')
end = '\r'
for ii in range(ddist.size):
for jj in range(di.size):
if ii == ndist-1 and jj == ndi-1:
end = '\n'
msg = (
"Computing mean focal error for det "
f"({ii+1}, {jj+1})/({ndist}, {ndi})"
).ljust(60)
print(msg, end=end, flush=True)
dpsi0bis = float(dpsi0)
if tangent_to_rowland:
dpsi0bis = dpsi0 - angle_nout
det = self.get_detector_ideal(
ddist=ddist[ii],
di=di[jj],
dj=dj0,
dtheta=dtheta0,
dpsi=dpsi0bis,
tilt=tilt0,
lamb=lamb,
bragg=bragg,
use_non_parallelism=use_non_parallelism,
tangent_to_rowland=False,
)
(
error_lambda_temp, test_lamb_interv[jj, ii],
) = self.calc_johannerror(
xi=xi, xj=xj,
det=det,
err=err,
lambda_interval_min=lambda_interval_min,
lambda_interval_max=lambda_interval_max,
plot=False,
)[::4]
error_lambda[jj, ii] = np.nanmean(error_lambda_temp)
if 'rel' in err:
units = '%'
else:
units = 'm'
if plot:
ax = _plot_optics.CrystalBragg_plot_focal_error_summed(
cryst=self, dcryst=dcryst,
lamb=lamb, bragg=bragg,
error_lambda=error_lambda,
ddist=ddist, di=di,
ddist0=ddist0, di0=di0, dj0=dj0,
dtheta0=dtheta0, dpsi0=dpsi0, tilt0=tilt0,
angle_nout=angle_nout,
det_ref=det_ref,
units=units,
plot_dets=plot_dets, nsort=nsort,
tangent_to_rowland=tangent_to_rowland,
use_non_parallelism=use_non_parallelism,
pts=pts,
test_lamb_interv=test_lamb_interv,
contour=contour,
fs=fs,
ax=ax,
cmap=cmap,
vmin=vmin,
vmax=vmax,
)
if return_ax:
return error_lambda, ddist, di, test_lamb_interv, ax
else:
return error_lambda, ddist, di, test_lamb_interv
def _get_local_coordinates_of_det(
self,
bragg=None,
lamb=None,
det_ref=None,
use_non_parallelism=None,
):
if det_ref is None:
msg = (
"You need to provide your arbitrary detector\n"
+ "\t in order to compute its spatial properties !\n"
+ "\t You provided: {}".format(det)
)
raise Exception(msg)
det_ref = self._checkformat_det(det=det_ref)
det_approx = self.get_detector_ideal(
bragg=bragg, lamb=lamb,
tangent_to_rowland=False,
use_non_parallelism=use_non_parallelism,
)
delta = det_ref['cent'] - det_approx['cent']
ddist = np.sum(delta * (-det_approx['nout']))
di = np.sum(delta * det_approx['ei'])
dj = np.sum(delta * det_approx['ej'])
dtheta, dpsi, tilt = None, None, None
sindtheta = np.sum(det_approx['ej'] * det_ref['nout'])
costheta_cospsi = np.sum(det_approx['nout'] * det_ref['nout'])
costheta_sinpsi = np.sum(det_approx['ei'] * det_ref['nout'])
costheta = np.sqrt(costheta_cospsi**2 + costheta_sinpsi**2)
dtheta = np.arctan2(sindtheta, costheta)
dpsi = np.arctan2(
costheta_sinpsi / costheta,
costheta_cospsi / costheta,
)
det_ei2 = (
np.cos(dpsi)*det_approx['ei'] - np.sin(dpsi)*det_approx['nout']
)
det_ej2 = np.cross(det_ref['nout'], det_ei2)
costilt = np.sum(det_ref['ei']*det_ei2)
sintilt = np.sum(det_ref['ei']*det_ej2)
tilt = np.arctan2(sintilt, costilt)
return ddist, di, dj, dtheta, dpsi, tilt
def get_lambbraggphi_from_ptsxixj_dthetapsi(
self,
pts=None,
xi=None, xj=None, det=None,
dtheta=None, psi=None,
ntheta=None, npsi=None,
n=None,
use_non_parallelism=None,
grid=None,
return_lamb=None,
):
if return_lamb is None:
return_lamb = True
det = self._checkformat_det(det)
summ, vout, ve1, ve2 = self.get_local_noute1e2(
dtheta=dtheta, psi=psi,
ntheta=ntheta, npsi=npsi,
use_non_parallelism=use_non_parallelism,
include_summit=True,
)
bragg, phi = _comp_optics.calc_braggphi_from_xixjpts(
pts=pts,
xi=xi, xj=xj, det=det,
summit=summ, nin=-vout, e1=ve1, e2=ve2,
grid=grid,
)
if return_lamb is True:
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
return bragg, phi, lamb
else:
return bragg, phi
def get_lamb_avail_from_pts(
self,
pts=None,
n=None, ndtheta=None,
det=None, nlamb=None, klamb=None,
use_non_parallelism=None,
strict=None,
return_phidtheta=None,
return_xixj=None,
):
if ndtheta is None:
ndtheta = 20
if nlamb is None:
nlamb = 100
assert nlamb >= 2, "nlamb must be >= 2"
if return_phidtheta is None:
return_phidtheta = True
if return_xixj is None:
return_xixj = det is not None
if det is None:
return_xixj = False
if det is None:
strict = False
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
pts=pts,
dtheta='envelop', psi='envelop',
ntheta=None, npsi=None,
n=n, grid=True,
use_non_parallelism=use_non_parallelism,
return_lamb=True,
)
lambmin = np.nanmin(lamb, axis=1)
lambmax = np.nanmax(lamb, axis=1)
if klamb is None:
klamb = np.linspace(0, 1, nlamb)
elif not (isinstance(klamb, np.ndarray) and klamb.ndim == 1):
msg = "Please provide klamb as a 1d vector!"
raise Exception(msg)
nlamb = klamb.size
lamb = lambmin[:, None] + (lambmax-lambmin)[:, None]*klamb
return _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(
cryst=self,
lamb=lamb,
n=n,
ndtheta=ndtheta,
pts=pts,
use_non_parallelism=use_non_parallelism,
return_phidtheta=return_phidtheta,
return_xixj=return_xixj,
strict=strict,
det=det,
)
def _calc_dthetapsiphi_from_lambpts(
self,
pts=None, bragg=None, lamb=None,
n=None, ndtheta=None,
use_non_parallelism=None,
grid=None,
):
pts = _comp_optics._checkformat_pts(pts)
npts = pts.shape[1]
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
nout, e1, e2, use_non_parallelism = self.get_unit_vectors(
use_non_parallelism=use_non_parallelism
)
dtheta, psi, indok, grid = _comp_optics.calc_dthetapsiphi_from_lambpts(
pts,
bragg,
summit=self._dgeom['summit'],
rcurve=self._dgeom['rcurve'],
nout=nout, e1=e1, e2=e2,
extenthalf=self._dgeom['extenthalf'],
ndtheta=ndtheta,
grid=grid,
)
if grid is True:
bragg = np.repeat(
np.repeat(
np.repeat(bragg[:, None], npts, axis=-1)[..., None],
dtheta.shape[2],
axis=-1,
)[..., None],
2,
axis=-1,
)
pts = pts[:, None, :, None, None]
else:
bragg = np.repeat(
np.repeat(bragg[:, None], dtheta.shape[1], axis=1)[..., None],
2,
axis=-1,
)
pts = pts[..., None, None]
bragg[~indok] = np.nan
bragg2, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
pts=pts,
dtheta=dtheta, psi=psi,
grid=False,
use_non_parallelism=use_non_parallelism,
return_lamb=False,
)
c0 = (
bragg2.shape == bragg.shape
and np.allclose(bragg, bragg2, equal_nan=True)
)
if not c0:
try:
plt.figure()
plt.plot(bragg, bragg2, '.')
except Exception as err:
pass
msg = (
"Inconsistency detected in bragg angle computations:\n"
+ "\t- from the points and lamb\n"
+ "\t- from the points and (dtheta, psi)\n"
+ "\nContext:\n"
+ "\t- use_non_parallelism: {}\n".format(use_non_parallelism)
+ "\t- bragg.shape = {}\n".format(bragg.shape)
+ "\t- bragg2.shape = {}\n".format(bragg2.shape)
)
raise Exception(msg)
return dtheta, psi, phi, bragg
def calc_raytracing_from_lambpts(
self,
lamb=None, bragg=None, pts=None,
xi_bounds=None, xj_bounds=None, nphi=None,
det=None, n=None, ndtheta=None,
johann=False, lpsi=None, ldtheta=None,
rocking=False, strict=None, plot=None, fs=None,
dmargin=None, wintit=None,
tit=None, proj=None,
legend=None, draw=None, returnas=None,
):
if returnas is None:
returnas = 'data'
if plot is None or plot is True:
plot = ['det', '3d']
if isinstance(plot, str):
plot = plot.split('+')
assert all([ss in ['det', '2d', '3d'] for ss in plot])
assert returnas in ['data', 'ax']
pts = _comp_optics._checkformat_pts(pts)
npts = pts.shape[1]
dtheta, psi, phi, bragg = self._calc_dthetapsiphi_from_lambpts(
pts=pts, lamb=lamb, bragg=bragg, n=n, ndtheta=ndtheta,
)
ndtheta = dtheta.shape[-1]
det = self._checkformat_det(det)
xi, xj = self.calc_xixj_from_braggphi(
bragg=bragg, phi=phi+np.pi, n=n,
dtheta=dtheta, psi=psi,
det=det, strict=strict, plot=False,
)
plot = False
if plot is not False:
ptscryst, ptsdet = None, None
if '2d' in plot or '3d' in plot:
ptscryst = self.get_local_noute1e2(dtheta, psi)[0]
ptsdet = (det['cent'][:, None, None, None]
+ xi[None, ...]*det['ei'][:, None, None, None]
+ xj[None, ...]*det['ej'][:, None, None, None])
ax = _plot_optics.CrystalBragg_plot_raytracing_from_lambpts(
xi=xi, xj=xj, lamb=lamb,
xi_bounds=xi_bounds, xj_bounds=xj_bounds,
pts=pts, ptscryst=ptscryst, ptsdet=ptsdet,
det_cent=det['cent'], det_nout=det['nout'],
det_ei=det['ei'], det_ej=det['ej'],
cryst=self, proj=plot, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, legend=legend, draw=draw)
if returnas == 'ax':
return ax
return dtheta, psi, phi, bragg, xi, xj
def _calc_spect1d_from_data2d(self, data, lamb, phi,
nlambfit=None, nphifit=None,
nxi=None, nxj=None,
spect1d=None, mask=None, vertsum1d=None):
if nlambfit is None:
nlambfit = nxi
if nphifit is None:
nphifit = nxj
return _comp_optics._calc_spect1d_from_data2d(
data, lamb, phi,
nlambfit=nlambfit,
nphifit=nphifit,
spect1d=spect1d,
mask=mask,
vertsum1d=vertsum1d,
)
def plot_data_vs_lambphi(
self,
xi=None, xj=None, data=None, mask=None,
det=None, dtheta=None, psi=None, n=None,
nlambfit=None, nphifit=None,
magaxis=None, npaxis=None,
dlines=None, spect1d='mean',
lambmin=None, lambmax=None,
xjcut=None, dxj=None,
plot=True, fs=None, tit=None, wintit=None,
cmap=None, vmin=None, vmax=None,
returnas=None,
):
assert data is not None
if returnas is None:
returnas = 'spect'
lreturn = ['ax', 'spect']
if returnas not in lreturn:
msg = ("Arg returnas must be in {}\n:".format(lreturn)
+ "\t- 'spect': return a 1d vertically averaged spectrum\n"
+ "\t- 'ax' : return a list of axes instances")
raise Exception(msg)
xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)
nxi = xi.size if xi is not None else np.unique(xii).size
nxj = xj.size if xj is not None else np.unique(xjj).size
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
(spect1d, lambfit, phifit,
vertsum1d, phiminmax) = self._calc_spect1d_from_data2d(
data, lamb, phi,
nlambfit=nlambfit, nphifit=nphifit, nxi=nxi, nxj=nxj,
spect1d=spect1d, mask=mask, vertsum1d=True
)
lambax, phiax = None, None
if magaxis is not None:
if npaxis is None:
npaxis = 1000
thetacryst = np.arctan2(self._dgeom['summit'][1],
self._dgeom['summit'][0])
thetaax = thetacryst + np.pi/2*np.linspace(-1, 1, npaxis)
pts = np.array([magaxis[0]*np.cos(thetaax),
magaxis[0]*np.sin(thetaax),
np.full((npaxis,), magaxis[1])])
braggax, phiax = self.calc_braggphi_from_pts(pts)
lambax = self.get_lamb_from_bragg(braggax)
phiax = np.arctan2(np.sin(phiax-np.pi), np.cos(phiax-np.pi))
ind = ((lambax >= lambfit[0]) & (lambax <= lambfit[-1])
& (phiax >= phifit[0]) & (phiax <= phifit[-1]))
lambax, phiax = lambax[ind], phiax[ind]
ind = np.argsort(lambax)
lambax, phiax = lambax[ind], phiax[ind]
lambcut, phicut, spectcut = None, None, None
if xjcut is not None:
if dxj is None:
dxj = 0.002
xjcut = np.sort(np.atleast_1d(xjcut).ravel())
xicutf = np.tile(xi, (xjcut.size, 1))
xjcutf = np.repeat(xjcut[:, None], nxi, axis=1)
(
braggcut, phicut, lambcut,
) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xicutf, xj=xjcutf, det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=1,
grid=True,
return_lamb=True,
)
indxj = [(np.abs(xj-xjc) <= dxj).nonzero()[0] for xjc in xjcut]
spectcut = np.array([np.nanmean(data[ixj, :], axis=0)
for ixj in indxj])
ax = None
if plot:
ax = _plot_optics.CrystalBragg_plot_data_vs_lambphi(
xi, xj, bragg, lamb, phi, data,
lambfit=lambfit, phifit=phifit, spect1d=spect1d,
vertsum1d=vertsum1d, lambax=lambax, phiax=phiax,
lambmin=lambmin, lambmax=lambmax, phiminmax=phiminmax,
xjcut=xjcut, lambcut=lambcut, phicut=phicut, spectcut=spectcut,
cmap=cmap, vmin=vmin, vmax=vmax, dlines=dlines,
tit=tit, wintit=wintit, fs=fs)
if returnas == 'spect':
return spect1d, lambfit
elif returnas == 'ax':
return ax
def get_plasmadomain_at_lamb(
self,
config=None,
struct=None,
domain=None,
res=None,
det=None,
xixj_lim=None,
strict=None,
bragg=None,
lamb=None,
ndtheta=None,
nlamb=None,
n=None,
use_non_parallelism=None,
plot=None,
dax=None,
plot_as=None,
lcolor=None,
return_dax=None,
):
struct = _check_optics._check_config_get_Ves(
config=config, struct=struct,
)
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
if ndtheta is None:
ndtheta = 5
if nlamb is None:
nlamb = 11
if strict is None:
strict = True
if plot is None:
plot = True
if return_dax is None:
return_dax = plot is True
(
pts, dV, ind, (resR, resZ, resPhi),
) = config.dStruct['dObj']['Ves'][struct].get_sampleV(
res=res,
domain=domain,
returnas='(R, Z, Phi)',
)
ptsXYZ = np.array([
pts[0, :]*np.cos(pts[2, :]),
pts[0, :]*np.sin(pts[2, :]),
pts[1, :],
])
lamb_access = self.get_lamb_avail_from_pts(
pts=ptsXYZ,
nlamb=2,
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=False,
strict=False,
)
lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)
for ii, ll in enumerate(lamb):
lambok[ii, :] = (
(lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])
)
indok = np.any(lambok, axis=0)
pts = pts[:, indok]
ptsXYZ = ptsXYZ[:, indok]
lambok = lambok[:, indok]
if strict is True:
detbis = dict(det)
if xixj_lim is not None:
detbis['outline'] = np.array([
np.r_[
xixj_lim[0][0],
xixj_lim[0][1]*np.r_[1, 1],
xixj_lim[0][0],
],
np.r_[
xixj_lim[1][0]*np.r_[1, 1],
xixj_lim[1][1]*np.r_[1, 1],
],
])
detbis['outline'] = np.concatenate(
(detbis['outline'], detbis['outline'][:, 0:1]),
axis=1,
)
for kk, ll in enumerate(lamb):
lambi = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(
cryst=self,
lamb=np.full((lambok[kk, :].sum(), 1), ll),
n=n,
ndtheta=ndtheta,
pts=ptsXYZ[:, lambok[kk, :]],
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=False,
strict=strict,
det=detbis,
)
lambok[kk, lambok[kk, :]] = ~np.isnan(lambi[:, 0])
if plot:
dax = _plot_optics.CrystalBragg_plot_plasma_domain_at_lamb(
cryst=self,
det=det,
xixj_lim=xixj_lim,
config=config,
lamb=lamb,
pts=pts,
reseff=[resR, resZ, resPhi],
lambok=lambok,
dax=dax,
plot_as=plot_as,
lcolor=lcolor,
)
if return_dax is True:
return pts, lambok, dax
else:
return pts, lambok
def calc_signal_from_emissivity(
self,
emis=None,
config=None,
struct=None,
domain=None,
res=None,
det=None,
xixj_lim=None,
strict=None,
bragg=None,
lamb=None,
binning=None,
ndtheta=None,
nlamb=None,
n=None,
use_non_parallelism=None,
plot=None,
vmin=None,
vmax=None,
vmin_bin=None,
vmax_bin=None,
cmap=None,
dax=None,
fs=None,
dmargin=None,
tit=None,
return_dax=None,
):
(
struct, lamb, binning,
) = _check_optics._check_calc_signal_from_emissivity(
emis=emis, config=config, struct=struct,
lamb=lamb, det=det, binning=binning,
)
bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)
lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)
if ndtheta is None:
ndtheta = 5
if nlamb is None:
nlamb = 11
if strict is None:
strict = True
if plot is None:
plot = True
if return_dax is None:
return_dax = plot is True
(
pts, dV, ind, (resR, resZ, resPhi),
) = config.dStruct['dObj']['Ves'][struct].get_sampleV(
res=res,
domain=domain,
returnas='(R, Z, Phi)',
)
ptsXYZ = np.array([
pts[0, :]*np.cos(pts[2, :]),
pts[0, :]*np.sin(pts[2, :]),
pts[1, :],
])
lamb_access = self.get_lamb_avail_from_pts(
pts=ptsXYZ,
nlamb=2,
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=False,
strict=False,
)
lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)
for ii, ll in enumerate(lamb):
lambok[ii, :] = (
(lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])
)
indok = np.any(lambok, axis=0)
pts = pts[:, indok]
ptsXYZ = ptsXYZ[:, indok]
lambok = lambok[:, indok]
detbis = dict(det)
if xixj_lim is not None:
detbis['outline'] = np.array([
np.r_[
xixj_lim[0][0],
xixj_lim[0][1]*np.r_[1, 1],
xixj_lim[0][0],
],
np.r_[
xixj_lim[1][0]*np.r_[1, 1],
xixj_lim[1][1]*np.r_[1, 1],
],
])
detbis['outline'] = np.concatenate(
(detbis['outline'], detbis['outline'][:, 0:1]),
axis=1,
)
shape = tuple(np.r_[pts.shape[1], lamb.size, ndtheta, 2])
xi = np.full(shape, np.nan)
xj = np.full(shape, np.nan)
val = np.full(shape, np.nan)
for kk, ll in enumerate(lamb):
(
lambi, xii, xji,
) = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(
cryst=self,
lamb=np.full((lambok[kk, :].sum(), 1), ll),
n=n,
ndtheta=ndtheta,
pts=ptsXYZ[:, lambok[kk, :]],
use_non_parallelism=use_non_parallelism,
return_phidtheta=False,
return_xixj=True,
strict=True,
det=detbis,
)
iok = ~np.isnan(lambi[:, 0])
iokf = lambok[kk, :].nonzero()[0][iok]
lambok[kk, lambok[kk, :]] = iok
xi[iokf, kk, :, :] = xii[iok, 0, :, :]
xj[iokf, kk, :, :] = xji[iok, 0, :, :]
val[iokf, kk, :, :] = emis(
r=pts[0, iokf],
z=pts[1, iokf],
phi=pts[2, iokf],
lamb=lamb[kk:kk+1],
t=None,
)[:, 0, None, None]
binned = None
if binning is not False:
iok = np.isfinite(val)
binned = scpstats.binned_statistic_2d(
xi[iok].ravel(),
xj[iok].ravel(),
val[iok].ravel(),
statistic='mean',
bins=binning,
expand_binnumbers=False,
)[0]
if plot:
dax = _plot_optics.CrystalBragg_plot_signal_from_emissivity(
cryst=self,
det=det,
xixj_lim=xixj_lim,
config=config,
lamb=lamb,
pts=pts,
reseff=[resR, resZ, resPhi],
xi=xi,
xj=xj,
val=val,
lambok=lambok,
binning=binning,
binned=binned,
vmin=vmin,
vmax=vmax,
vmin_bin=vmin_bin,
vmax_bin=vmax_bin,
cmap=cmap,
dax=dax,
fs=fs,
dmargin=dmargin,
tit=tit,
)
if return_dax is True:
return pts, val, xi, xj, binned, dax
else:
return pts, val, xi, xj, binned
@staticmethod
def fit1d_dinput(
dlines=None, dconstraints=None, dprepare=None,
data=None, lamb=None,
mask=None, domain=None, pos=None, subset=None,
same_spectrum=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None, valid_return_fract=None,
):
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit1d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, lamb=lamb,
mask=mask, domain=domain, pos=pos, subset=subset,
same_spectrum=same_spectrum,
same_spectrum_dlamb=same_spectrum_dlamb,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,
valid_return_fract=valid_return_fract)
def fit1d(
self,
data=None, lamb=None,
dinput=None, dprepare=None, dlines=None, dconstraints=None,
mask=None, domain=None, subset=None, pos=None,
same_spectrum=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None,
dx0=None, dscales=None, x0_scale=None, bounds_scale=None,
method=None, tr_solver=None, tr_options=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=None, chain=None, jac=None, showonly=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None, vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
save=None, name=None, path=None,
plot=None, fs=None, dmargin=None,
tit=None, wintit=None, returnas=None,
):
if dinput is None:
dinput = self.fit1d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, lamb=lamb,
mask=mask, domain=domain, pos=pos, subset=subset,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,
same_spectrum=same_spectrum,
same_spectrum_dlamb=same_spectrum_dlamb)
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit1d(
data=data, lamb=lamb,
dinput=dinput, dprepare=dprepare,
dlines=dlines, dconstraints=dconstraints,
mask=mask, domain=domain, subset=subset, pos=pos,
method=method, tr_solver=tr_solver, tr_options=tr_options,
xtol=xtol, ftol=ftol, gtol=gtol,
max_nfev=max_nfev, loss=loss, chain=chain,
dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,
jac=jac, verbose=verbose,
save=save, name=name, path=path,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width, vi=vi, shift=shift,
pts_lamb_total=pts_lamb_total,
pts_lamb_detail=pts_lamb_detail,
plot=plot, fs=fs, wintit=wintit, tit=tit)
@staticmethod
def fit1d_extract(
dfit1d=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
):
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit1d_extract(
dfit1d=dfit,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width,
vi=vi, shift=shift,
pts_lamb_total=pts_lamb_total, pts_lamb_detail=pts_lamb_detail)
def fit1d_from2d(self):
if lphi is None:
msg = ("Arg lphi must be provided !")
raise Exception(msg)
if dprepare is None:
dprepare = self.fit2d_prepare(
data=data, xi=xi, xj=xj, n=n,
det=det, dtheta=dtheta, psi=psi,
mask=mask, domain=domain,
pos=pos, binning=binning,
nbsplines=False, subset=False,
lphi=lphi, lphi_tol=lphi_tol)
if dinput is None:
dinput = self.fit2d_dinput(
dlines=dlines, dconstraints=dconstraints,
deg=deg, knots=knots, nbsplines=nbsplines,
domain=dprepare['domain'],
dataphi1d=dprepare['dataphi1d'], phi1d=dprepare['phi1d'])
out = self.fit1d(
xi=None, xj=None, data=None, mask=None,
det=None, dtheta=None, psi=None, n=None,
nlambfit=None, nphifit=None,
lambmin=None, lambmax=None,
dlines=None, spect1d=None,
dconstraints=None, dx0=None,
same_spectrum=None, dlamb=None,
double=None,
dscales=None, x0_scale=None, bounds_scale=None,
method=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=0, chain=None,
jac=None, showonly=None,
plot=None, fs=None, dmargin=None,
tit=None, wintit=None, returnas=None,
)
pass
def fit2d_dinput(
self, dlines=None, dconstraints=None, dprepare=None,
data=None, xi=None, xj=None, n=None,
det=None, dtheta=None, psi=None,
mask=None, domain=None, pos=None, binning=None, subset=None,
deg=None, knots=None, nbsplines=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None, valid_return_fract=None,
):
import tofu.spectro._fit12d as _fit12d
if dprepare is None:
xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)
nxi = xi.size if xi is not None else np.unique(xii).size
nxj = xj.size if xj is not None else np.unique(xjj).size
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xii, xj=xjj, det=det,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
dprepare = _fit12d.multigausfit2d_from_dlines_prepare(
data, lamb, phi,
mask=mask, domain=domain,
pos=pos, binning=binning,
nbsplines=nbsplines, subset=subset,
nxi=nxi, nxj=nxj,
)
return _fit12d.fit2d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
deg=deg, knots=knots, nbsplines=nbsplines,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,
valid_return_fract=valid_return_fract)
def fit2d(
self,
data=None, xi=None, xj=None,
det=None, dtheta=None, psi=None, n=None,
dinput=None, dprepare=None, dlines=None, dconstraints=None,
mask=None, domain=None, subset=None, pos=None, binning=None,
focus=None, valid_fraction=None, valid_nsigma=None,
focus_half_width=None,
deg=None, knots=None, nbsplines=None,
dx0=None, dscales=None, x0_scale=None, bounds_scale=None,
method=None, tr_solver=None, tr_options=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=None, chain=None, jac=None, showonly=None,
predeclare=None, debug=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None, vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
save=None, name=None, path=None,
plot=None, fs=None, dmargin=None,
tit=None, wintit=None, returnas=None,
):
if dinput is None:
dinput = self.fit2d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, xi=xi, xj=xj, n=n,
det=det, dtheta=dtheta, psi=psi,
mask=mask, domain=domain,
pos=pos, binning=binning, subset=subset,
deg=deg, knots=knots, nbsplines=nbsplines,
focus=focus, valid_fraction=valid_fraction,
valid_nsigma=valid_nsigma, focus_half_width=focus_half_width)
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit2d(
dinput=dinput, dprepare=dprepare,
dlines=dlines, dconstraints=dconstraints,
lamb=lamb, phi=phi, data=data, mask=mask,
nxi=dinput['dprepare']['nxi'], nxj=dinput['dprepare']['nxj'],
domain=domain, pos=pos, binning=binning, subset=subset,
deg=deg, knots=knots, nbsplines=nbsplines,
method=method, tr_solver=tr_solver, tr_options=tr_options,
xtol=xtol, ftol=ftol, gtol=gtol,
max_nfev=max_nfev, loss=loss, chain=chain,
dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,
jac=jac, verbose=verbose,
save=save, name=name, path=path,
plot=plot)
@staticmethod
def fit2d_extract(dfit2d=None,
amp=None, Ti=None, vi=None,
pts_phi=None, npts_phi=None,
pts_lamb_phi_total=None,
pts_lamb_phi_detail=None):
import tofu.spectro._fit12d as _fit12d
return _fit12d.fit2d_extract_data(
dfit2d=dfit2d,
amp=amp, Ti=Ti, vi=vi,
pts_phi=pts_phi, npts_phi=npts_phi,
pts_lamb_phi_total=pts_lamb_phi_total,
pts_lamb_phi_detail=pts_lamb_phi_detail)
def fit2d_plot(self, dfit2d=None, ratio=None,
dax=None, plotmode=None, angunits=None,
cmap=None, vmin=None, vmax=None,
dmargin=None, tit=None, wintit=None, fs=None):
dout = self.fit2d_extract(
dfit2d,
amp=amp, Ti=Ti, vi=vi,
pts_lamb_phi_total=pts_lamb_phi_total,
pts_lamb_phi_detail=pts_lamb_phi_detail)
return _plot_optics.CrystalBragg_plot_data_fit2d(
dfit2d=dfit2d, dout=dout, ratio=ratio,
dax=dax, plotmode=plotmode, angunits=angunits,
cmap=cmap, vmin=vmin, vmax=vmax,
dmargin=dmargin, tit=tit, wintit=wintit, fs=fs)
def noise_analysis(
self, data=None, xi=None, xj=None, n=None,
det=None, dtheta=None, psi=None,
mask=None, valid_fraction=None, nxerrbin=None,
margin=None, domain=None, nlamb=None,
deg=None, knots=None, nbsplines=None,
loss=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
method=None, tr_solver=None, tr_options=None,
verbose=None, plot=None,
ms=None, dcolor=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save_fig=None, name_fig=None, path_fig=None,
fmt=None, return_dax=None,
):
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xi, xj=xj, det=det,
dtheta=dtheta, psi=psi,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
import tofu.spectro._fit12d as _fit12d
return _fit12d.noise_analysis_2d(
data, lamb, phi,
mask=mask, valid_fraction=valid_fraction,
margin=margin, nxerrbin=nxerrbin,
nlamb=nlamb, deg=deg, knots=knots, nbsplines=nbsplines,
loss=loss, max_nfev=max_nfev,
xtol=xtol, ftol=ftol, gtol=gtol,
method=method, tr_solver=tr_solver, tr_options=tr_options,
verbose=verbose, plot=plot,
ms=ms, dcolor=dcolor,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,
fmt=fmt, return_dax=return_dax)
@staticmethod
def noise_analysis_plot(
dnoise=None, margin=None, valid_fraction=None,
ms=None, dcolor=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save=None, name=None, path=None, fmt=None,
):
import tofu.spectro._plot as _plot_spectro
return _plot_spectro.plot_noise_analysis(
dnoise=dnoise, margin=margin, valid_fraction=valid_fraction,
ms=ms, dcolor=dcolor,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save=save, name=name, path=path, fmt=fmt)
def noise_analysis_scannbs(
self, data=None, xi=None, xj=None, n=None,
det=None, dtheta=None, psi=None,
mask=None, nxerrbin=None,
domain=None, nlamb=None,
deg=None, knots=None, nbsplines=None, lnbsplines=None,
loss=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
method=None, tr_solver=None, tr_options=None,
verbose=None, plot=None,
ms=None, dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save_fig=None, name_fig=None, path_fig=None,
fmt=None, return_dax=None,
):
bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(
xi=xi, xj=xj, det=det,
dtheta=0, psi=0,
use_non_parallelism=use_non_parallelism,
n=n,
grid=True,
return_lamb=True,
)
import tofu.spectro._fit12d as _fit12d
return _fit12d.noise_analysis_2d_scannbs(
data, lamb, phi,
mask=mask, nxerrbin=nxerrbin, nlamb=nlamb,
deg=deg, knots=knots, nbsplines=nbsplines, lnbsplines=lnbsplines,
loss=loss, max_nfev=max_nfev,
xtol=xtol, ftol=ftol, gtol=gtol,
method=method, tr_solver=tr_solver, tr_options=tr_options,
verbose=verbose, plot=plot,
ms=ms, dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,
fmt=fmt, return_dax=return_dax)
@staticmethod
def noise_analysis_scannbs_plot(
dnoise_scan=None, ms=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save=None, name=None, path=None, fmt=None,
):
import tofu.spectro._plot as _plot_spectro
return _plot_spectro.plot_noise_analysis_scannbs(
dnoise=dnoise_scan, ms=ms,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save=save, name=name, path=path, fmt=fmt)
| true
| true
|
f71a0da9d68a3d4c9024e6fcb718688385715211
| 83
|
py
|
Python
|
buttonlist/src/buttonlist/__main__.py
|
pmfrank/beeware-tutorials
|
96274b0a735bd468e946111baf441a527ff0b0d5
|
[
"BSD-2-Clause"
] | 1
|
2021-06-04T05:51:39.000Z
|
2021-06-04T05:51:39.000Z
|
buttonlist/src/buttonlist/__main__.py
|
pmfrank/beeware-tutorials
|
96274b0a735bd468e946111baf441a527ff0b0d5
|
[
"BSD-2-Clause"
] | null | null | null |
buttonlist/src/buttonlist/__main__.py
|
pmfrank/beeware-tutorials
|
96274b0a735bd468e946111baf441a527ff0b0d5
|
[
"BSD-2-Clause"
] | null | null | null |
from buttonlist.app import main
if __name__ == '__main__':
main().main_loop()
| 16.6
| 31
| 0.698795
|
from buttonlist.app import main
if __name__ == '__main__':
main().main_loop()
| true
| true
|
f71a0f4dbef3bd901ce744bc93811b52faddf399
| 34,662
|
py
|
Python
|
anuvaad-etl/anuvaad-extractor/document-processor/evaluator/evaluator_string/src/notebooks/tesseract_ocr_evaluation_local.py
|
srihari-nagaraj/anuvaad
|
b09b01a033a033e97db6e404c088e0e6332053e4
|
[
"MIT"
] | null | null | null |
anuvaad-etl/anuvaad-extractor/document-processor/evaluator/evaluator_string/src/notebooks/tesseract_ocr_evaluation_local.py
|
srihari-nagaraj/anuvaad
|
b09b01a033a033e97db6e404c088e0e6332053e4
|
[
"MIT"
] | null | null | null |
anuvaad-etl/anuvaad-extractor/document-processor/evaluator/evaluator_string/src/notebooks/tesseract_ocr_evaluation_local.py
|
srihari-nagaraj/anuvaad
|
b09b01a033a033e97db6e404c088e0e6332053e4
|
[
"MIT"
] | null | null | null |
import glob
import uuid
import json
import requests
import copy,time
import os
import cv2
import numpy as np
from time import sleep
import pandas as pd
import logging
from collections import Counter
import pytesseract
from pytesseract import Output
#from pytesseract import pytesseract
from difflib import SequenceMatcher
from io import StringIO
from dynamic_adjustment import coord_adjustment
import ast
from leven import levenshtein
from horizontal_merging import horzontal_merging
ocr_level = "LINE"
text_processing = True
REJECT_FILTER = 2
#crop_factor= 5
#crop_factor_y= 4
crop_factor= 5
crop_factor_y= 0
crop_save = True
digitization = True
vis_thresh=0.90
LANG_MAPPING = {
"en" : ["Latin","eng"],
"kn" : ['Kannada',"kan"],
"gu": ["guj"],
"or": ["ori"],
"hi" : ["Devanagari","hin","eng"],
"bn" : ["Bengali","ben"],
"mr": ["Devanagari","hin","eng"],
"ta": ['Tamil',"tam"],
"te" : ["Telugu","tel"],
"ml" :["Malayalam"],
"ma" :["Marathi"]
}
#path = '/home/ubuntu/tesseract_evaluation/data/'
#output_path = '/home/ubuntu/tesseract_evaluation/result/'
#output_path_boxes= '/home/ubuntu/tesseract_evaluation/test_word_boxes/'
#base_path = '/home/ubuntu/tesseract_evaluation/test_word_boxes/'
path = '/home/naresh/Tarento/testing_document_processor/test_pipeline/data/'
output_path = '/home/naresh/Tarento/testing_document_processor/test_pipeline/result/'
output_path_boxes= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'
base_path= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'
psms = [6,7,8,9,10,11]
token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyTmFtZSI6ImRoaXJhai5kYWdhQHRhcmVudG8uY29tIiwicGFzc3dvcmQiOiJiJyQyYiQxMiRuTXdNcHpCVlBXVVUvSlVLWXBKYWkuQUd2SUNJalJVcUdIbnBPenRzai5VRU55emlSZmk1TyciLCJleHAiOjE2MTk3Njg2NjN9.14IL5_kw83F5gxjUMSw6kCDLYQhjAg306AwJj0DsxWc'
word_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
google_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
layout_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
segmenter_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
bs_url ="https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/jobs/search/bulk"
evaluator_url = "https://auth.anuvaad.org/anuvaad-etl/document-processor/evaluator/v0/process"
#evaluator_url = 'http://0.0.0.0:5001/anuvaad-etl/document-processor/evaluator/v0/process'
download_url ="https://auth.anuvaad.org/download/"
upload_url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'
headers = {
'auth-token' :token }
class Draw:
def __init__(self,input_json,save_dir,regions,prefix='',color= (255,0,0),thickness=5):
self.json = input_json
self.save_dir = save_dir
self.regions = regions
self.prefix = prefix
self.color = color
self.thickness=thickness
if self.prefix == 'seg':
#print('drawing children')
self.draw_region_children()
else:
self.draw_region__sub_children()
def get_coords(self,page_index):
return self.json['outputs'][0]['pages'][page_index][self.regions]
def get_page_count(self):
return(self.json['outputs'][0]['page_info'])
def get_page(self,page_index):
page_path = self.json['outputs'][0]['page_info'][page_index]
page_path = page_path.split('upload')[1]#'/'.join(page_path.split('/')[1:])
#print(page_path)
return download_file(download_url,headers,page_path,f_type='image')
def draw_region(self):
font = cv2.FONT_HERSHEY_SIMPLEX
for page_index in range(len(self.get_page_count())) :
nparr = np.frombuffer(self.get_page(page_index), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
for region in self.get_coords(page_index) :
ground = region['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
cv2.polylines(image, [np.array(pts)],True, self.color, self.thickness)
if 'class' not in region.keys():
region['class'] = 'TEXT'
cv2.putText(image, str(region['class']), (pts[0][0],pts[0][1]), font,
2, (0,125,255), 3, cv2.LINE_AA)
image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.regions,self.prefix,page_index))
cv2.imwrite(image_path , image)
def draw_region_children(self):
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 2
thickness =3
for page_index in range(len(self.get_page_count())) :
nparr = np.frombuffer(self.get_page(page_index), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
for region_index,region in enumerate(self.get_coords(page_index)) :
try:
ground = region['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
#print(pts)
region_color = (0 ,0,125+ 130*(region_index/ len(self.get_coords(page_index))))
cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)
cv2.putText(image, str(region_index), (pts[0][0],pts[0][1]), font,
fontScale, region_color, thickness, cv2.LINE_AA)
for line_index, line in enumerate(region['children']):
ground = line['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
line_color = (125 + 130*(region_index/ len(self.get_coords(page_index))) ,0,0)
cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)
cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font,
fontScale, line_color, thickness, cv2.LINE_AA)
except Exception as e:
print(str(e))
print(region)
image_path = os.path.join(self.save_dir , '{}_{}.png'.format(self.prefix,page_index))
cv2.imwrite(image_path , image)
def draw_region__sub_children(self):
for page_index in range(len(self.get_page_count())) :
nparr = np.frombuffer(self.get_page(page_index), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 2
# Blue color in BGR
color = (0 ,255,0)
# Line thickness of 2 px
thickness = 3
# Using cv2.putText() method
for region_index,region in enumerate(self.get_coords(page_index)) :
try:
ground = region['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
#print(pts)
region_color = (0,0,255)
cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)
for line_index, line in enumerate(region['regions']):
ground = line['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x'])-1 ,int(pt['y']) -1 ])
line_color = (255,0,0)
cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)
cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font,
fontScale, (255,0,0), thickness, cv2.LINE_AA)
for word_index, word in enumerate(line['regions']):
ground = word['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) -3,int(pt['y'])-3])
word_color = (0,255,0)
cv2.polylines(image, [np.array(pts)],True, word_color, self.thickness -2)
cv2.putText(image, str(word_index), (pts[0][0],pts[0][1]), font,
fontScale-1,(0,255,0), thickness, cv2.LINE_AA)
except Exception as e:
print(str(e))
print(region)
#print(self.prefix)
image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.prefix,self.regions,page_index))
cv2.imwrite(image_path , image)
# # google vision pipeline
def google_ocr_v15(url,headers,pdf_name):
file = {
"files": [
{
"locale": "hi",
"path": pdf_name,
"type": "pdf",
"config":{
"OCR": {
"option": "HIGH_ACCURACY",
"language": "hi",
"top_correction":"True",
"craft_word": "True",
"craft_line": "True",
}
}}
],
"workflowCode": "WF_A_FCWDLDBSOD15GV"
}
res = requests.post(url,json=file,headers=headers)
return res.json()
def upload_file(pdf_file,headers,url):
#url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'
files = [
('file',(open(pdf_file,'rb')))]
response = requests.post(url, headers=headers, files=files)
return response.json()
def download_file(download_url,headers,outputfile,f_type='json'):
download_url =download_url+str(outputfile)
res = requests.get(download_url,headers=headers)
if f_type == 'json':
return res.json()
else :
return res.content
def save_json(path,res):
with open(path, "w", encoding='utf8') as write_file:
json.dump(res, write_file,ensure_ascii=False )
def bulk_search(job_id,bs_url,headers):
bs_request = {
"jobIDs": [job_id],
"taskDetails":"true"
}
print(job_id)
res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)
print(res.json())
while(1):
in_progress = res.json()['jobs'][0]['status']
if in_progress == 'COMPLETED':
outputfile = res.json()['jobs'][0]['output'][0]['outputFile']
print(in_progress)
return outputfile
break
sleep(0.5)
print(in_progress)
res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)
def execute_module(module,url,input_file,module_code,pdf_dir,overwirte=True , draw=True):
output_path = os.path.join(pdf_dir,'{}.json'.format(module_code))
if os.path.exists(output_path) and not overwirte:
print(' loading *****************{}'.format(module_code ))
with open(output_path,'r') as wd_file :
response = json.load(wd_file)
wf_res = pdf_dir + '/{}_wf.json'.format(module_code)
with open(wf_res,'r') as wd_file :
json_file = json.load(wd_file)
#json_file = upload_file(output_path,headers,upload_url)['data']
else :
if module_code in ['wd','gv']:
res = upload_file(input_file,headers,upload_url)
print('upload response **********', res)
pdf_name = res['data']
response = module(url,headers,pdf_name)
else :
response = module(url,headers,input_file)
if 'eval' in module_code :
json_file = response['outputFile']
response = download_file(download_url,headers,json_file)
save_json(output_path,response)
return json_file,response
print(' response *****************{} {}'.format(module_code ,response ))
job_id = response['jobID']
json_file = bulk_search(job_id,bs_url,headers)
save_json(pdf_dir + '/{}_wf.json'.format(module_code),json_file)
print('bulk search response **************',json_file )
response = download_file(download_url,headers,json_file)
save_json(output_path,response)
if draw :
if module_code in ['wd','gv']:
Draw(response,pdf_dir,regions='lines',prefix=module_code)
else :
Draw(response,pdf_dir,regions='regions',prefix=module_code)
return json_file,response
def evaluate__and_save_input(pdf_files,output_dir,headers,word_url,layout_url,download_url,upload_url,bs_url):
word_responses = {}
layout_responses = {}
segmenter_responses = []
for pdf in pdf_files:
#try :
pdf_name = pdf.split('/')[-1].split('.')[0]
print(pdf , ' is being processed')
pdf_output_dir = os.path.join(output_dir,pdf_name)
os.system('mkdir -p "{}"'.format(pdf_output_dir))
wd_json,_ = execute_module(google_ocr_v15,word_url,input_file=pdf,module_code='gv',pdf_dir=pdf_output_dir,overwirte=False , draw=False)
def main(path,headers,word_url,layout_url,download_url,upload_url,bs_url):
pdf_names = glob.glob(path + '/*.pdf')
return evaluate__and_save_input(pdf_names,output_path,headers,word_url,layout_url,download_url,upload_url,bs_url)
if digitization:
main(path,headers,word_url,layout_url,download_url,upload_url,bs_url)
def bound_coordinate(corrdinate,max):
if corrdinate < 0 :
corrdinate = 0
if corrdinate > max:
corrdinate = max - 2
return int(corrdinate)
def get_image_from_box(image, box, height=140):
#box = data['box']
#scale = np.sqrt((box[1, 1] - box[2, 1])**2 + (box[0, 1] - box[3, 1])**2) / height
#print("scale is ",scale)
#w = int(np.sqrt((box[0, 0] - box[1, 0])**2 + (box[2, 0] - box[3, 0])**2) / scale)
w = max(abs(box[0, 0] - box[1, 0]),abs(box[2, 0] - box[3, 0]))
height = max(abs(box[0, 1] - box[3, 1]),abs(box[1, 1] - box[2, 1]))
pts1 = np.float32(box)
#w=2266-376
pts2 = np.float32([[0, 0], [int(w), 0],[int(w),int(height)],[0,int(height)]])
M = cv2.getPerspectiveTransform(pts1, pts2)
result_img = cv2.warpPerspective(image,M,(int(w), int(height))) #flags=cv2.INTER_NEAREST
return result_img
def process_dfs(temp_df):
temp_df = temp_df[temp_df.text.notnull()]
text = ""
conf=0
temp_dict1 = []
for index, row in temp_df.iterrows():
temp_dict2 = {}
conf = conf + row["conf"]
temp_dict2["text"]=row['text']
temp_dict2["conf"]=row['conf']
text = text +" "+ str(row['text'])
temp_dict1.append(temp_dict2)
return text,temp_dict1
def process_dfs_updated(temp_df,language,psm_val,image):
temp_df = temp_df[temp_df.text.notnull()]
text = ""
conf=0
temp_dict1 = []
if len(temp_df)>0:
for index, row in temp_df.iterrows():
temp_dict2 = {}
org_conf = row["conf"]
org_text = row['text']
flag = True
if row["conf"]<50:
print(row["top"],row["height"],row["left"],row["width"])
crop_image = image[ int(row["top"]):int(row["top"]+row["height"]), int(row["left"]):int(row["left"]+row["width"])]
for psm in psms:
df2 = pytesseract.image_to_data(crop_image,config='--psm '+str(psm), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)
temp_df2 = df2[df2.text.notnull()]
if len(temp_df2)>0:
new_conf = temp_df2.iloc[0].conf
if org_conf<new_conf:
org_conf = new_conf
org_text = temp_df2.iloc[0].text
if flag:
print("old text", row['text'])
print("new text", org_text)
conf = conf + org_conf
temp_dict2["text"]=org_text
temp_dict2["conf"]=org_conf
text = text +" "+ str(org_text)
temp_dict1.append(temp_dict2)
return text,temp_dict1
def check_psm(path,coord,language,mode_height,save_base_path,psm_val,org_score,org_text,line_text,org_conf):
for psm in psms:
text,conf_dict = get_text(path,coord,language,mode_height,save_base_path,psm)
if text_processing:
text_list = text.split()
text = " ".join(text_list)
score,message,match_count = seq_matcher(text,line_text)
if score==1.0 or score==1:
org_score = score
org_text = text
org_conf = conf_dict
break
elif score>org_score:
org_score =score
org_text = text
org_conf = conf_dict
return org_text, org_conf,org_score
def get_text(path,coord,language,mode_height,save_base_path,psm_val):
#try:
path = path.split('upload')[1]
image = download_file(download_url,headers,path,f_type='image')
nparr = np.frombuffer(image, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
#image = cv2.imread("/home/naresh/crop.jpeg",0)
height, width,channel = image.shape
# left = bound_coordinate(coord[0] , width)
# top = bound_coordinate(coord[1],height )
# right = bound_coordinate(coord[2] ,width)
# bottom = bound_coordinate(coord[3], height)
# region_width = abs(right-left)
# region_height = abs(bottom-top)
# if left==right==top==bottom==0 or region_width==0 or region_height==0:
# return ""
crop_image = get_image_from_box(image, coord, height=abs(coord[0,1]-coord[2,1]))
#crop_image = image[ top:bottom, left:right]
#crop_image_cv = image[ coord[0,1]:coord[2,1], coord[0,0]:coord[1,0]]
save_path = save_base_path+"/"+"_psm_pers"+str(psm_val)+"--"+str(uuid.uuid4()) + '.jpg'
if crop_save:
cv2.imwrite(save_path,crop_image)
#if abs(bottom-top) > 3*mode_height:
#print(LANG_MAPPING[language][0])
if abs(coord[1,1]-coord[2,1])>mode_height:
#text = pytesseract.image_to_string(crop_image,config='--psm 6', lang=LANG_MAPPING[language][1])
dfs = pytesseract.image_to_data(crop_image,config='--psm 6', lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)
#text,conf_dict = process_dfs(dfs)
text,conf_dict = process_dfs_updated(dfs,language,6,crop_image)
else:
#text = pytesseract.image_to_string(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][1])
dfs = pytesseract.image_to_data(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)
#text,conf_dict = process_dfs(dfs)
text,conf_dict = process_dfs_updated(dfs,language,psm_val,crop_image)
return text,conf_dict
#except:
#print("xxxxxxxxxxxxxxxxxxxxxxxxxx",coord)
#print([0.0])
#return "",[0.0]
def merger_text(line):
text = ""
word_count=0
for word_idx, word in enumerate(line['regions']):
if "text" in word.keys() and word["text"].replace(" ", "") != "":
text = text+" "+ word["text"]
word_count=word_count+1
return text, word_count
def get_coord(bbox):
temp_box = []
temp_box_cv = []
temp_box.append([bbox["boundingBox"]['vertices'][0]['x'],bbox["boundingBox"]['vertices'][0]['y']])
temp_box.append([bbox["boundingBox"]['vertices'][1]['x'],bbox["boundingBox"]['vertices'][1]['y']])
temp_box.append([bbox["boundingBox"]['vertices'][2]['x'],bbox["boundingBox"]['vertices'][2]['y']])
temp_box.append([bbox["boundingBox"]['vertices'][3]['x'],bbox["boundingBox"]['vertices'][3]['y']])
temp_box_cv.append(bbox["boundingBox"]['vertices'][0]['x'])
temp_box_cv.append(bbox["boundingBox"]['vertices'][0]['y'])
temp_box_cv.append(bbox["boundingBox"]['vertices'][2]['x'])
temp_box_cv.append(bbox["boundingBox"]['vertices'][2]['y'])
temp_box = np.array(temp_box)
return temp_box,temp_box_cv
def frequent_height(page_info):
text_height = []
if len(page_info) > 0 :
for idx, level in enumerate(page_info):
coord_crop,coord = get_coord(level)
if len(coord)!=0:
text_height.append(abs(coord[3]-coord[1]))
occurence_count = Counter(text_height)
return occurence_count.most_common(1)[0][0]
else :
return 0
def remove_space(a):
return a.replace(" ", "")
def seq_matcher(tgt_text,gt_text):
tgt_text = remove_space(tgt_text)
gt_text = remove_space(gt_text)
score = SequenceMatcher(None, gt_text, tgt_text).ratio()
mismatch_count = levenshtein(tgt_text, gt_text)
match_count = abs(len(gt_text)-mismatch_count)
score = match_count/len(gt_text)
# matchs = list(SequenceMatcher(None, gt_text, tgt_text).get_matching_blocks())
# match_count=0
## match_lis = []
# for match in matchs:
# match_count = match_count + match.size
message = {"ground":True,"input":True}
if score==0.0:
if len(gt_text)>0 and len(tgt_text)==0:
message['input'] = "text missing in tesseract"
if len(gt_text)==0 and len(tgt_text)>0:
message['ground'] = "text missing in google vision"
if score==1.0 and len(gt_text)==0 and len(tgt_text)==0:
message['ground'] = "text missing in google vision"
message['input'] = "text missing in tesseract"
return score,message,match_count
def count_mismatch_char(gt ,tgt) :
count=0
gt_count = len(gt)
for i,j in zip(gt,tgt):
if i==j:
count=count+1
mismatch_char = abs(gt_count-count)
return mismatch_char
def correct_region(region):
box = region['boundingBox']['vertices']
tmp=0
region['boundingBox']= {'vertices' : [{'x':box[0]['x']-crop_factor,'y':box[0]['y']-crop_factor_y},\
{'x':box[1]['x']+crop_factor+tmp,'y':box[1]['y']-crop_factor_y},\
{'x':box[2]['x']+crop_factor+tmp,'y':box[2]['y']+crop_factor_y},\
{'x':box[3]['x']-crop_factor,'y': box[3]['y']+crop_factor_y}]}
return region
def sort_line(line):
line['regions'].sort(key=lambda x: x['boundingBox']['vertices'][0]['x'],reverse=False)
return line
def cell_ocr_word(lang, page_path, line,save_base_path,mode_height):
cell_text =""
conf_dicts=[]
#updated_lines = horzontal_merging(line['regions'])
dynamic_line = coord_adjustment(page_path,line['regions'] ,save_base_path)
for word_idx, word in enumerate(dynamic_line):
word = correct_region(word)
coord_crop, coord = get_coord(word)
if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)
cell_text = cell_text +" " +text
conf_dicts.extend(conf_dict)
return cell_text,conf_dicts
def cell_text_ocr(lang, page_path, line,save_base_path,mode_height):
cell_text =""
cell_regions = []
#updated_lines = horzontal_merging(line['regions'])
for word_idx, word in enumerate(line['regions']):
word = correct_region(word)
coord_crop, coord = get_coord(word)
if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)
cell_text = cell_text +" " +text
return cell_text
def cell_ocr(lang, page_path, line,save_base_path,mode_height,psm):
text =""
cell_google_text = ""
conf_dicts = []
updated_lines = horzontal_merging(line['regions'])
dynamic_line = coord_adjustment(page_path,updated_lines ,save_base_path)
for updated_line in dynamic_line:
line_text = updated_line['text']
cell_google_text= cell_google_text + " "+line_text
corrected_line = correct_region(updated_line)
coord_crop, coord = get_coord(corrected_line)
if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
tess_text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm)
text = text + " " + tess_text
conf_dicts.extend(conf_dict)
return cell_google_text,text,conf_dicts
def text_extraction(df,lang, page_path, regions,save_base_path):
final_score = 0
total_words = 0
total_lines = 0
total_chars = 0
total_match_chars = 0
for idx, level in enumerate(regions):
mode_height = frequent_height(level['regions'])
if ocr_level=="WORD":
for line_idx, line in enumerate(level['regions']):
#word_regions = coord_adjustment(page_path, line['regions'],save_base_path)
for word_idx, word in enumerate(line['regions']):
word = correct_region(word)
coord_crop, coord = get_coord(word)
word_text = word['text']
if len(word_text)>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)
if text_processing:
text_list = text.split()
text = " ".join(text_list)
score,message,match_count = seq_matcher(text,word['text'])
final_score = final_score+score
total_words = total_words+1
total_chars = total_chars+len(remove_space(word['text']))
total_match_chars= total_match_chars+match_count
word['char_match'] = match_count
word['tess_text'] = text
word['conf_dict'] = conf_dict
word['score'] = score
word['message'] = message
columns = word.keys()
df2 = pd.DataFrame([word],columns=columns)
df = df.append(df2, ignore_index=True)
elif len(word_text)>0:
score,message,match_count = seq_matcher("",word['text'])
word['char_match'] = match_count
word['tess_text'] = " "
word['conf_dict'] = None
word['score'] = score
word['message'] = message
columns = word.keys()
df2 = pd.DataFrame([word],columns=columns)
df = df.append(df2, ignore_index=True)
if ocr_level=="LINE":
lines_adjusted = coord_adjustment(page_path, level['regions'],save_base_path)
for line_idx, line_org in enumerate(lines_adjusted):
line_sorted = copy.deepcopy(sort_line(line_org))
line_text,total_word = merger_text(line_sorted)
line = copy.deepcopy(correct_region(line_sorted))
psm = 7
if total_word<2:
#print(line_text)
psm=8
coord_crop, coord = get_coord(line)
print("line text",line_text)
if len(remove_space(line_text))>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
if 'class' in line.keys() and line['class']=="CELL":
line_text,text,conf_dict = cell_ocr(lang, page_path, line,save_base_path,mode_height,psm)
elif 'class' in line.keys() and line['class']=="CELL_TEXT":
text,conf_dict = cell_ocr_word(lang, page_path, line,save_base_path,mode_height)
else:
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm)
if text_processing:
text_list = text.split()
text = " ".join(text_list)
score,message,match_count = seq_matcher(text,line_text)
#if score < 1.0:
#text, conf_dict,score = check_psm(page_path,coord_crop,lang,mode_height,save_base_path,psm,score,text,line_text,conf_dict)
final_score = final_score+score
total_lines = total_lines+1
total_chars = total_chars+len(remove_space(line_text))
total_match_chars= total_match_chars+match_count
line['char_match'] = match_count
line['tess_text'] = text
line['text'] = line_text
line['conf_dict'] = conf_dict
line['score'] = score
line['message'] = message
columns = line.keys()
df2 = pd.DataFrame([line],columns=columns)
df = df.append(df2, ignore_index=True)
elif len(remove_space(line_text))>0:
score,message,match_count = seq_matcher("",line_text)
line['char_match'] = match_count
line['tess_text'] = " "
line['conf_dict'] = None
line['text'] = line_text
line['score'] = score
line['message'] = message
columns = line.keys()
df2 = pd.DataFrame([line],columns=columns)
df = df.append(df2, ignore_index=True)
#return regions,final_score/total_words,df,total_chars,total_match_chars
return regions,final_score/total_lines,df,total_chars,total_match_chars
json_files_path = glob.glob(output_path+"/*/gv.json")
def tesseract(json_files):
output = []
dfs =[]
for json_file in json_files:
file_name = json_file.split('/')[-1].split('.json')[0]
pdf_name = json_file.split('/')[-2]
print("file name--------------------->>>>>>>>>>>>>>>>>>",pdf_name)
if not os.path.exists(base_path+pdf_name):
os.mkdir(base_path+pdf_name)
save_base_path = base_path+pdf_name
with open(json_file,'r+') as f:
data = json.load(f)
columns = ["page_path","page_data","file_eval_info"]
final_df = pd.DataFrame(columns=columns)
Draw(data,save_base_path,regions='regions')
lang = data['outputs'][0]['config']['OCR']['language']
total_page = len(data['outputs'][0]['pages'])
file_score = 0; total_chars_file = 0
file_data = []; total_match_chars_file = 0
page_paths = []
page_data_counts = []
for idx,page_data in enumerate(data['outputs'][0]['pages']):
t1 = time.time()
print("processing started for page no. ",idx)
page_path = page_data['path']
regions = page_data['regions'][1:]
df = pd.DataFrame()
regions,score,df,total_chars,total_match_chars = text_extraction(df,lang, page_path, regions,save_base_path)
file_score = file_score + score
total_chars_file =total_chars_file +total_chars
total_match_chars_file = total_match_chars_file+total_match_chars
file_data.append(df.to_csv())
page_paths.append(page_path)
char_details = {"total_chars":total_chars,"total_match_chars":total_match_chars}
page_data_counts.append(char_details)
data['outputs'][0]['pages'][idx]["regions"][1:] = copy.deepcopy(regions)
t2 = t1+time.time()
print("processing completed for page in {}".format(t2))
file_eval_info = {"total_chars":total_chars_file,"total_match_chars":total_match_chars_file,"score":total_match_chars_file/total_chars_file}
print(file_eval_info)
final_df["page_path"] = page_paths
final_df["page_data"] = file_data
final_df["file_eval_info"] = [file_eval_info]*len(page_paths)
print("file level evaluation result------------------->>>>>>>>>>>>>>>>>>>>>>>>>>>",file_eval_info)
data['outputs'][0]['score'] = file_score/total_page
with open(save_base_path+"/"+file_name+".json", 'w') as outfile:
json.dump(data, outfile)
final_df.to_csv(save_base_path+"/"+file_name+'.csv')
return output,final_df
output,dfs = tesseract(json_files_path)
def draw_thresh_box(df,path,page_index,save_path):
path = path.split('upload')[1]
image = download_file(download_url,headers,path,f_type='image')
nparr = np.frombuffer(image, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
font = cv2.FONT_HERSHEY_SIMPLEX
color= (255,0,0);thickness=5
df =df.reset_index()
for row in df.iterrows():
row2 = row[1].to_dict()
boxes = row2['boundingBox']
boxes2 = ast.literal_eval(boxes)
ground = boxes2['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
cv2.polylines(image, [np.array(pts)],True, color, thickness)
cv2.putText(image, str(row2['text']), (pts[0][0],pts[0][1]), font,
2, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(image, str(row2['tess_text']), (pts[1][0],pts[1][1]), font,
2, (0,255,0), 2, cv2.LINE_AA)
image_path = os.path.join(save_path , '{}.png'.format(page_index))
cv2.imwrite(image_path , image)
def visualize_results(df_paths,thresh):
for df_path in glob.glob(df_paths+"*/*.csv"):
save_path = base_path + df_path.split('/')[-2]+"/"
df = pd.read_csv(df_path)
for idx,(page_path,page_data) in enumerate(zip(df['page_path'],df['page_data'])):
df_string = StringIO(page_data)
page_df = pd.read_csv(df_string, sep=",")
filtered_df = page_df[page_df['score']<thresh]
draw_thresh_box(filtered_df,page_path,idx,save_path)
visualize_results(base_path,vis_thresh)
| 39.523375
| 267
| 0.585454
|
import glob
import uuid
import json
import requests
import copy,time
import os
import cv2
import numpy as np
from time import sleep
import pandas as pd
import logging
from collections import Counter
import pytesseract
from pytesseract import Output
from difflib import SequenceMatcher
from io import StringIO
from dynamic_adjustment import coord_adjustment
import ast
from leven import levenshtein
from horizontal_merging import horzontal_merging
ocr_level = "LINE"
text_processing = True
REJECT_FILTER = 2
crop_factor= 5
crop_factor_y= 0
crop_save = True
digitization = True
vis_thresh=0.90
LANG_MAPPING = {
"en" : ["Latin","eng"],
"kn" : ['Kannada',"kan"],
"gu": ["guj"],
"or": ["ori"],
"hi" : ["Devanagari","hin","eng"],
"bn" : ["Bengali","ben"],
"mr": ["Devanagari","hin","eng"],
"ta": ['Tamil',"tam"],
"te" : ["Telugu","tel"],
"ml" :["Malayalam"],
"ma" :["Marathi"]
}
path = '/home/naresh/Tarento/testing_document_processor/test_pipeline/data/'
output_path = '/home/naresh/Tarento/testing_document_processor/test_pipeline/result/'
output_path_boxes= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'
base_path= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'
psms = [6,7,8,9,10,11]
token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyTmFtZSI6ImRoaXJhai5kYWdhQHRhcmVudG8uY29tIiwicGFzc3dvcmQiOiJiJyQyYiQxMiRuTXdNcHpCVlBXVVUvSlVLWXBKYWkuQUd2SUNJalJVcUdIbnBPenRzai5VRU55emlSZmk1TyciLCJleHAiOjE2MTk3Njg2NjN9.14IL5_kw83F5gxjUMSw6kCDLYQhjAg306AwJj0DsxWc'
word_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
google_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
layout_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
segmenter_url = "https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate"
bs_url ="https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/jobs/search/bulk"
evaluator_url = "https://auth.anuvaad.org/anuvaad-etl/document-processor/evaluator/v0/process"
download_url ="https://auth.anuvaad.org/download/"
upload_url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'
headers = {
'auth-token' :token }
class Draw:
def __init__(self,input_json,save_dir,regions,prefix='',color= (255,0,0),thickness=5):
self.json = input_json
self.save_dir = save_dir
self.regions = regions
self.prefix = prefix
self.color = color
self.thickness=thickness
if self.prefix == 'seg':
self.draw_region_children()
else:
self.draw_region__sub_children()
def get_coords(self,page_index):
return self.json['outputs'][0]['pages'][page_index][self.regions]
def get_page_count(self):
return(self.json['outputs'][0]['page_info'])
def get_page(self,page_index):
page_path = self.json['outputs'][0]['page_info'][page_index]
page_path = page_path.split('upload')[1]
return download_file(download_url,headers,page_path,f_type='image')
def draw_region(self):
font = cv2.FONT_HERSHEY_SIMPLEX
for page_index in range(len(self.get_page_count())) :
nparr = np.frombuffer(self.get_page(page_index), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
for region in self.get_coords(page_index) :
ground = region['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
cv2.polylines(image, [np.array(pts)],True, self.color, self.thickness)
if 'class' not in region.keys():
region['class'] = 'TEXT'
cv2.putText(image, str(region['class']), (pts[0][0],pts[0][1]), font,
2, (0,125,255), 3, cv2.LINE_AA)
image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.regions,self.prefix,page_index))
cv2.imwrite(image_path , image)
def draw_region_children(self):
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 2
thickness =3
for page_index in range(len(self.get_page_count())) :
nparr = np.frombuffer(self.get_page(page_index), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
for region_index,region in enumerate(self.get_coords(page_index)) :
try:
ground = region['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
region_color = (0 ,0,125+ 130*(region_index/ len(self.get_coords(page_index))))
cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)
cv2.putText(image, str(region_index), (pts[0][0],pts[0][1]), font,
fontScale, region_color, thickness, cv2.LINE_AA)
for line_index, line in enumerate(region['children']):
ground = line['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
line_color = (125 + 130*(region_index/ len(self.get_coords(page_index))) ,0,0)
cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)
cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font,
fontScale, line_color, thickness, cv2.LINE_AA)
except Exception as e:
print(str(e))
print(region)
image_path = os.path.join(self.save_dir , '{}_{}.png'.format(self.prefix,page_index))
cv2.imwrite(image_path , image)
def draw_region__sub_children(self):
for page_index in range(len(self.get_page_count())) :
nparr = np.frombuffer(self.get_page(page_index), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 2
color = (0 ,255,0)
thickness = 3
for region_index,region in enumerate(self.get_coords(page_index)) :
try:
ground = region['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
region_color = (0,0,255)
cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)
for line_index, line in enumerate(region['regions']):
ground = line['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x'])-1 ,int(pt['y']) -1 ])
line_color = (255,0,0)
cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)
cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font,
fontScale, (255,0,0), thickness, cv2.LINE_AA)
for word_index, word in enumerate(line['regions']):
ground = word['boundingBox']['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) -3,int(pt['y'])-3])
word_color = (0,255,0)
cv2.polylines(image, [np.array(pts)],True, word_color, self.thickness -2)
cv2.putText(image, str(word_index), (pts[0][0],pts[0][1]), font,
fontScale-1,(0,255,0), thickness, cv2.LINE_AA)
except Exception as e:
print(str(e))
print(region)
image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.prefix,self.regions,page_index))
cv2.imwrite(image_path , image)
l,headers,pdf_name):
file = {
"files": [
{
"locale": "hi",
"path": pdf_name,
"type": "pdf",
"config":{
"OCR": {
"option": "HIGH_ACCURACY",
"language": "hi",
"top_correction":"True",
"craft_word": "True",
"craft_line": "True",
}
}}
],
"workflowCode": "WF_A_FCWDLDBSOD15GV"
}
res = requests.post(url,json=file,headers=headers)
return res.json()
def upload_file(pdf_file,headers,url):
files = [
('file',(open(pdf_file,'rb')))]
response = requests.post(url, headers=headers, files=files)
return response.json()
def download_file(download_url,headers,outputfile,f_type='json'):
download_url =download_url+str(outputfile)
res = requests.get(download_url,headers=headers)
if f_type == 'json':
return res.json()
else :
return res.content
def save_json(path,res):
with open(path, "w", encoding='utf8') as write_file:
json.dump(res, write_file,ensure_ascii=False )
def bulk_search(job_id,bs_url,headers):
bs_request = {
"jobIDs": [job_id],
"taskDetails":"true"
}
print(job_id)
res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)
print(res.json())
while(1):
in_progress = res.json()['jobs'][0]['status']
if in_progress == 'COMPLETED':
outputfile = res.json()['jobs'][0]['output'][0]['outputFile']
print(in_progress)
return outputfile
break
sleep(0.5)
print(in_progress)
res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)
def execute_module(module,url,input_file,module_code,pdf_dir,overwirte=True , draw=True):
output_path = os.path.join(pdf_dir,'{}.json'.format(module_code))
if os.path.exists(output_path) and not overwirte:
print(' loading *****************{}'.format(module_code ))
with open(output_path,'r') as wd_file :
response = json.load(wd_file)
wf_res = pdf_dir + '/{}_wf.json'.format(module_code)
with open(wf_res,'r') as wd_file :
json_file = json.load(wd_file)
else :
if module_code in ['wd','gv']:
res = upload_file(input_file,headers,upload_url)
print('upload response **********', res)
pdf_name = res['data']
response = module(url,headers,pdf_name)
else :
response = module(url,headers,input_file)
if 'eval' in module_code :
json_file = response['outputFile']
response = download_file(download_url,headers,json_file)
save_json(output_path,response)
return json_file,response
print(' response *****************{} {}'.format(module_code ,response ))
job_id = response['jobID']
json_file = bulk_search(job_id,bs_url,headers)
save_json(pdf_dir + '/{}_wf.json'.format(module_code),json_file)
print('bulk search response **************',json_file )
response = download_file(download_url,headers,json_file)
save_json(output_path,response)
if draw :
if module_code in ['wd','gv']:
Draw(response,pdf_dir,regions='lines',prefix=module_code)
else :
Draw(response,pdf_dir,regions='regions',prefix=module_code)
return json_file,response
def evaluate__and_save_input(pdf_files,output_dir,headers,word_url,layout_url,download_url,upload_url,bs_url):
word_responses = {}
layout_responses = {}
segmenter_responses = []
for pdf in pdf_files:
pdf_name = pdf.split('/')[-1].split('.')[0]
print(pdf , ' is being processed')
pdf_output_dir = os.path.join(output_dir,pdf_name)
os.system('mkdir -p "{}"'.format(pdf_output_dir))
wd_json,_ = execute_module(google_ocr_v15,word_url,input_file=pdf,module_code='gv',pdf_dir=pdf_output_dir,overwirte=False , draw=False)
def main(path,headers,word_url,layout_url,download_url,upload_url,bs_url):
pdf_names = glob.glob(path + '/*.pdf')
return evaluate__and_save_input(pdf_names,output_path,headers,word_url,layout_url,download_url,upload_url,bs_url)
if digitization:
main(path,headers,word_url,layout_url,download_url,upload_url,bs_url)
def bound_coordinate(corrdinate,max):
if corrdinate < 0 :
corrdinate = 0
if corrdinate > max:
corrdinate = max - 2
return int(corrdinate)
def get_image_from_box(image, box, height=140):
w = max(abs(box[0, 0] - box[1, 0]),abs(box[2, 0] - box[3, 0]))
height = max(abs(box[0, 1] - box[3, 1]),abs(box[1, 1] - box[2, 1]))
pts1 = np.float32(box)
pts2 = np.float32([[0, 0], [int(w), 0],[int(w),int(height)],[0,int(height)]])
M = cv2.getPerspectiveTransform(pts1, pts2)
result_img = cv2.warpPerspective(image,M,(int(w), int(height)))
return result_img
def process_dfs(temp_df):
temp_df = temp_df[temp_df.text.notnull()]
text = ""
conf=0
temp_dict1 = []
for index, row in temp_df.iterrows():
temp_dict2 = {}
conf = conf + row["conf"]
temp_dict2["text"]=row['text']
temp_dict2["conf"]=row['conf']
text = text +" "+ str(row['text'])
temp_dict1.append(temp_dict2)
return text,temp_dict1
def process_dfs_updated(temp_df,language,psm_val,image):
temp_df = temp_df[temp_df.text.notnull()]
text = ""
conf=0
temp_dict1 = []
if len(temp_df)>0:
for index, row in temp_df.iterrows():
temp_dict2 = {}
org_conf = row["conf"]
org_text = row['text']
flag = True
if row["conf"]<50:
print(row["top"],row["height"],row["left"],row["width"])
crop_image = image[ int(row["top"]):int(row["top"]+row["height"]), int(row["left"]):int(row["left"]+row["width"])]
for psm in psms:
df2 = pytesseract.image_to_data(crop_image,config='--psm '+str(psm), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)
temp_df2 = df2[df2.text.notnull()]
if len(temp_df2)>0:
new_conf = temp_df2.iloc[0].conf
if org_conf<new_conf:
org_conf = new_conf
org_text = temp_df2.iloc[0].text
if flag:
print("old text", row['text'])
print("new text", org_text)
conf = conf + org_conf
temp_dict2["text"]=org_text
temp_dict2["conf"]=org_conf
text = text +" "+ str(org_text)
temp_dict1.append(temp_dict2)
return text,temp_dict1
def check_psm(path,coord,language,mode_height,save_base_path,psm_val,org_score,org_text,line_text,org_conf):
for psm in psms:
text,conf_dict = get_text(path,coord,language,mode_height,save_base_path,psm)
if text_processing:
text_list = text.split()
text = " ".join(text_list)
score,message,match_count = seq_matcher(text,line_text)
if score==1.0 or score==1:
org_score = score
org_text = text
org_conf = conf_dict
break
elif score>org_score:
org_score =score
org_text = text
org_conf = conf_dict
return org_text, org_conf,org_score
def get_text(path,coord,language,mode_height,save_base_path,psm_val):
path = path.split('upload')[1]
image = download_file(download_url,headers,path,f_type='image')
nparr = np.frombuffer(image, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
height, width,channel = image.shape
crop_image = get_image_from_box(image, coord, height=abs(coord[0,1]-coord[2,1]))
save_path = save_base_path+"/"+"_psm_pers"+str(psm_val)+"--"+str(uuid.uuid4()) + '.jpg'
if crop_save:
cv2.imwrite(save_path,crop_image)
if abs(coord[1,1]-coord[2,1])>mode_height:
dfs = pytesseract.image_to_data(crop_image,config='--psm 6', lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)
text,conf_dict = process_dfs_updated(dfs,language,6,crop_image)
else:
dfs = pytesseract.image_to_data(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)
text,conf_dict = process_dfs_updated(dfs,language,psm_val,crop_image)
return text,conf_dict
def merger_text(line):
text = ""
word_count=0
for word_idx, word in enumerate(line['regions']):
if "text" in word.keys() and word["text"].replace(" ", "") != "":
text = text+" "+ word["text"]
word_count=word_count+1
return text, word_count
def get_coord(bbox):
temp_box = []
temp_box_cv = []
temp_box.append([bbox["boundingBox"]['vertices'][0]['x'],bbox["boundingBox"]['vertices'][0]['y']])
temp_box.append([bbox["boundingBox"]['vertices'][1]['x'],bbox["boundingBox"]['vertices'][1]['y']])
temp_box.append([bbox["boundingBox"]['vertices'][2]['x'],bbox["boundingBox"]['vertices'][2]['y']])
temp_box.append([bbox["boundingBox"]['vertices'][3]['x'],bbox["boundingBox"]['vertices'][3]['y']])
temp_box_cv.append(bbox["boundingBox"]['vertices'][0]['x'])
temp_box_cv.append(bbox["boundingBox"]['vertices'][0]['y'])
temp_box_cv.append(bbox["boundingBox"]['vertices'][2]['x'])
temp_box_cv.append(bbox["boundingBox"]['vertices'][2]['y'])
temp_box = np.array(temp_box)
return temp_box,temp_box_cv
def frequent_height(page_info):
text_height = []
if len(page_info) > 0 :
for idx, level in enumerate(page_info):
coord_crop,coord = get_coord(level)
if len(coord)!=0:
text_height.append(abs(coord[3]-coord[1]))
occurence_count = Counter(text_height)
return occurence_count.most_common(1)[0][0]
else :
return 0
def remove_space(a):
return a.replace(" ", "")
def seq_matcher(tgt_text,gt_text):
tgt_text = remove_space(tgt_text)
gt_text = remove_space(gt_text)
score = SequenceMatcher(None, gt_text, tgt_text).ratio()
mismatch_count = levenshtein(tgt_text, gt_text)
match_count = abs(len(gt_text)-mismatch_count)
score = match_count/len(gt_text)
{"ground":True,"input":True}
if score==0.0:
if len(gt_text)>0 and len(tgt_text)==0:
message['input'] = "text missing in tesseract"
if len(gt_text)==0 and len(tgt_text)>0:
message['ground'] = "text missing in google vision"
if score==1.0 and len(gt_text)==0 and len(tgt_text)==0:
message['ground'] = "text missing in google vision"
message['input'] = "text missing in tesseract"
return score,message,match_count
def count_mismatch_char(gt ,tgt) :
count=0
gt_count = len(gt)
for i,j in zip(gt,tgt):
if i==j:
count=count+1
mismatch_char = abs(gt_count-count)
return mismatch_char
def correct_region(region):
box = region['boundingBox']['vertices']
tmp=0
region['boundingBox']= {'vertices' : [{'x':box[0]['x']-crop_factor,'y':box[0]['y']-crop_factor_y},\
{'x':box[1]['x']+crop_factor+tmp,'y':box[1]['y']-crop_factor_y},\
{'x':box[2]['x']+crop_factor+tmp,'y':box[2]['y']+crop_factor_y},\
{'x':box[3]['x']-crop_factor,'y': box[3]['y']+crop_factor_y}]}
return region
def sort_line(line):
line['regions'].sort(key=lambda x: x['boundingBox']['vertices'][0]['x'],reverse=False)
return line
def cell_ocr_word(lang, page_path, line,save_base_path,mode_height):
cell_text =""
conf_dicts=[]
dynamic_line = coord_adjustment(page_path,line['regions'] ,save_base_path)
for word_idx, word in enumerate(dynamic_line):
word = correct_region(word)
coord_crop, coord = get_coord(word)
if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)
cell_text = cell_text +" " +text
conf_dicts.extend(conf_dict)
return cell_text,conf_dicts
def cell_text_ocr(lang, page_path, line,save_base_path,mode_height):
cell_text =""
cell_regions = []
for word_idx, word in enumerate(line['regions']):
word = correct_region(word)
coord_crop, coord = get_coord(word)
if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)
cell_text = cell_text +" " +text
return cell_text
def cell_ocr(lang, page_path, line,save_base_path,mode_height,psm):
text =""
cell_google_text = ""
conf_dicts = []
updated_lines = horzontal_merging(line['regions'])
dynamic_line = coord_adjustment(page_path,updated_lines ,save_base_path)
for updated_line in dynamic_line:
line_text = updated_line['text']
cell_google_text= cell_google_text + " "+line_text
corrected_line = correct_region(updated_line)
coord_crop, coord = get_coord(corrected_line)
if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
tess_text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm)
text = text + " " + tess_text
conf_dicts.extend(conf_dict)
return cell_google_text,text,conf_dicts
def text_extraction(df,lang, page_path, regions,save_base_path):
final_score = 0
total_words = 0
total_lines = 0
total_chars = 0
total_match_chars = 0
for idx, level in enumerate(regions):
mode_height = frequent_height(level['regions'])
if ocr_level=="WORD":
for line_idx, line in enumerate(level['regions']):
for word_idx, word in enumerate(line['regions']):
word = correct_region(word)
coord_crop, coord = get_coord(word)
word_text = word['text']
if len(word_text)>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)
if text_processing:
text_list = text.split()
text = " ".join(text_list)
score,message,match_count = seq_matcher(text,word['text'])
final_score = final_score+score
total_words = total_words+1
total_chars = total_chars+len(remove_space(word['text']))
total_match_chars= total_match_chars+match_count
word['char_match'] = match_count
word['tess_text'] = text
word['conf_dict'] = conf_dict
word['score'] = score
word['message'] = message
columns = word.keys()
df2 = pd.DataFrame([word],columns=columns)
df = df.append(df2, ignore_index=True)
elif len(word_text)>0:
score,message,match_count = seq_matcher("",word['text'])
word['char_match'] = match_count
word['tess_text'] = " "
word['conf_dict'] = None
word['score'] = score
word['message'] = message
columns = word.keys()
df2 = pd.DataFrame([word],columns=columns)
df = df.append(df2, ignore_index=True)
if ocr_level=="LINE":
lines_adjusted = coord_adjustment(page_path, level['regions'],save_base_path)
for line_idx, line_org in enumerate(lines_adjusted):
line_sorted = copy.deepcopy(sort_line(line_org))
line_text,total_word = merger_text(line_sorted)
line = copy.deepcopy(correct_region(line_sorted))
psm = 7
if total_word<2:
psm=8
coord_crop, coord = get_coord(line)
print("line text",line_text)
if len(remove_space(line_text))>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :
if 'class' in line.keys() and line['class']=="CELL":
line_text,text,conf_dict = cell_ocr(lang, page_path, line,save_base_path,mode_height,psm)
elif 'class' in line.keys() and line['class']=="CELL_TEXT":
text,conf_dict = cell_ocr_word(lang, page_path, line,save_base_path,mode_height)
else:
text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm)
if text_processing:
text_list = text.split()
text = " ".join(text_list)
score,message,match_count = seq_matcher(text,line_text)
final_score = final_score+score
total_lines = total_lines+1
total_chars = total_chars+len(remove_space(line_text))
total_match_chars= total_match_chars+match_count
line['char_match'] = match_count
line['tess_text'] = text
line['text'] = line_text
line['conf_dict'] = conf_dict
line['score'] = score
line['message'] = message
columns = line.keys()
df2 = pd.DataFrame([line],columns=columns)
df = df.append(df2, ignore_index=True)
elif len(remove_space(line_text))>0:
score,message,match_count = seq_matcher("",line_text)
line['char_match'] = match_count
line['tess_text'] = " "
line['conf_dict'] = None
line['text'] = line_text
line['score'] = score
line['message'] = message
columns = line.keys()
df2 = pd.DataFrame([line],columns=columns)
df = df.append(df2, ignore_index=True)
return regions,final_score/total_lines,df,total_chars,total_match_chars
json_files_path = glob.glob(output_path+"/*/gv.json")
def tesseract(json_files):
output = []
dfs =[]
for json_file in json_files:
file_name = json_file.split('/')[-1].split('.json')[0]
pdf_name = json_file.split('/')[-2]
print("file name--------------------->>>>>>>>>>>>>>>>>>",pdf_name)
if not os.path.exists(base_path+pdf_name):
os.mkdir(base_path+pdf_name)
save_base_path = base_path+pdf_name
with open(json_file,'r+') as f:
data = json.load(f)
columns = ["page_path","page_data","file_eval_info"]
final_df = pd.DataFrame(columns=columns)
Draw(data,save_base_path,regions='regions')
lang = data['outputs'][0]['config']['OCR']['language']
total_page = len(data['outputs'][0]['pages'])
file_score = 0; total_chars_file = 0
file_data = []; total_match_chars_file = 0
page_paths = []
page_data_counts = []
for idx,page_data in enumerate(data['outputs'][0]['pages']):
t1 = time.time()
print("processing started for page no. ",idx)
page_path = page_data['path']
regions = page_data['regions'][1:]
df = pd.DataFrame()
regions,score,df,total_chars,total_match_chars = text_extraction(df,lang, page_path, regions,save_base_path)
file_score = file_score + score
total_chars_file =total_chars_file +total_chars
total_match_chars_file = total_match_chars_file+total_match_chars
file_data.append(df.to_csv())
page_paths.append(page_path)
char_details = {"total_chars":total_chars,"total_match_chars":total_match_chars}
page_data_counts.append(char_details)
data['outputs'][0]['pages'][idx]["regions"][1:] = copy.deepcopy(regions)
t2 = t1+time.time()
print("processing completed for page in {}".format(t2))
file_eval_info = {"total_chars":total_chars_file,"total_match_chars":total_match_chars_file,"score":total_match_chars_file/total_chars_file}
print(file_eval_info)
final_df["page_path"] = page_paths
final_df["page_data"] = file_data
final_df["file_eval_info"] = [file_eval_info]*len(page_paths)
print("file level evaluation result------------------->>>>>>>>>>>>>>>>>>>>>>>>>>>",file_eval_info)
data['outputs'][0]['score'] = file_score/total_page
with open(save_base_path+"/"+file_name+".json", 'w') as outfile:
json.dump(data, outfile)
final_df.to_csv(save_base_path+"/"+file_name+'.csv')
return output,final_df
output,dfs = tesseract(json_files_path)
def draw_thresh_box(df,path,page_index,save_path):
path = path.split('upload')[1]
image = download_file(download_url,headers,path,f_type='image')
nparr = np.frombuffer(image, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
font = cv2.FONT_HERSHEY_SIMPLEX
color= (255,0,0);thickness=5
df =df.reset_index()
for row in df.iterrows():
row2 = row[1].to_dict()
boxes = row2['boundingBox']
boxes2 = ast.literal_eval(boxes)
ground = boxes2['vertices']
pts = []
for pt in ground:
pts.append([int(pt['x']) ,int(pt['y'])])
cv2.polylines(image, [np.array(pts)],True, color, thickness)
cv2.putText(image, str(row2['text']), (pts[0][0],pts[0][1]), font,
2, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(image, str(row2['tess_text']), (pts[1][0],pts[1][1]), font,
2, (0,255,0), 2, cv2.LINE_AA)
image_path = os.path.join(save_path , '{}.png'.format(page_index))
cv2.imwrite(image_path , image)
def visualize_results(df_paths,thresh):
for df_path in glob.glob(df_paths+"*/*.csv"):
save_path = base_path + df_path.split('/')[-2]+"/"
df = pd.read_csv(df_path)
for idx,(page_path,page_data) in enumerate(zip(df['page_path'],df['page_data'])):
df_string = StringIO(page_data)
page_df = pd.read_csv(df_string, sep=",")
filtered_df = page_df[page_df['score']<thresh]
draw_thresh_box(filtered_df,page_path,idx,save_path)
visualize_results(base_path,vis_thresh)
| true
| true
|
f71a0fa1a2c43932c97418939b1e8e7d6e4bf79a
| 4,010
|
py
|
Python
|
tools/bitmap_converter.py
|
AlexShiLucky/nuttx-apps
|
2bafb70ce1e7af96640c501d3ce3d2a2bf29c9e5
|
[
"Apache-2.0"
] | 10
|
2021-03-15T03:58:06.000Z
|
2021-12-30T15:33:38.000Z
|
tools/bitmap_converter.py
|
AlexShiLucky/nuttx-apps
|
2bafb70ce1e7af96640c501d3ce3d2a2bf29c9e5
|
[
"Apache-2.0"
] | 1
|
2021-02-24T12:30:54.000Z
|
2021-02-24T12:30:54.000Z
|
tools/bitmap_converter.py
|
AlexShiLucky/nuttx-apps
|
2bafb70ce1e7af96640c501d3ce3d2a2bf29c9e5
|
[
"Apache-2.0"
] | 4
|
2021-03-06T09:35:58.000Z
|
2021-05-24T14:34:11.000Z
|
#!/usr/bin/env python
'''This script converts from any image type supported by
Python imaging library to the RLE-encoded format used by
NxWidgets.
'''
from PIL import Image
def get_palette(img, maxcolors = 255):
'''Returns a list of colors. If there are too many colors in the image,
the least used are removed.
'''
img = img.convert("RGB")
colors = img.getcolors(65536)
colors.sort(key = lambda c: -c[0])
return [c[1] for c in colors[:maxcolors]]
def write_palette(outfile, palette):
'''Write the palette (normal and highlight) to the output file.'''
outfile.write('static const NXWidgets::nxwidget_pixel_t palette[BITMAP_PALETTESIZE] =\n');
outfile.write('{\n')
for i in range(0, len(palette), 4):
outfile.write(' ');
for r, g, b in palette[i:i+4]:
outfile.write('MKRGB(%3d,%3d,%3d), ' % (r, g, b))
outfile.write('\n');
outfile.write('};\n\n')
outfile.write('static const NXWidgets::nxwidget_pixel_t hilight_palette[BITMAP_PALETTESIZE] =\n');
outfile.write('{\n')
for i in range(0, len(palette), 4):
outfile.write(' ');
for r, g, b in palette[i:i+4]:
r = min(255, r + 50)
g = min(255, g + 50)
b = min(255, b + 50)
outfile.write('MKRGB(%3d,%3d,%3d), ' % (r, g, b))
outfile.write('\n');
outfile.write('};\n\n')
def quantize(color, palette):
'''Return the color index to closest match in the palette.'''
try:
return palette.index(color)
except ValueError:
# No exact match, search for the closest
def distance(color2):
return sum([(a - b)**2 for a, b in zip(color, color2)])
return palette.index(min(palette, key = distance));
def encode_row(img, palette, y):
'''RLE-encode one row of image data.'''
entries = []
color = None
repeats = 0
for x in range(0, img.size[0]):
c = quantize(img.getpixel((x, y)), palette)
if c == color and repeats < 255:
repeats += 1
else:
if color is not None:
entries.append((repeats, color))
repeats = 1
color = c
if color is not None:
entries.append((repeats, color))
return entries
def write_image(outfile, img, palette):
'''Write the image contents to the output file.'''
outfile.write('static const NXWidgets::SRlePaletteBitmapEntry bitmap[] =\n');
outfile.write('{\n');
for y in range(0, img.size[1]):
entries = encode_row(img, palette, y)
row = ""
for r, c in entries:
if len(row) > 60:
outfile.write(' ' + row + '\n')
row = ""
row += '{%3d, %3d}, ' % (r, c)
row += ' ' * (73 - len(row))
outfile.write(' ' + row + '/* Row %d */\n' % y)
outfile.write('};\n\n');
def write_descriptor(outfile, name):
'''Write the public descriptor structure for the image.'''
outfile.write('extern const struct NXWidgets::SRlePaletteBitmap g_%s =\n' % name)
outfile.write('{\n')
outfile.write(' CONFIG_NXWIDGETS_BPP,\n')
outfile.write(' CONFIG_NXWIDGETS_FMT,\n')
outfile.write(' BITMAP_PALETTESIZE,\n')
outfile.write(' BITMAP_WIDTH,\n')
outfile.write(' BITMAP_HEIGHT,\n')
outfile.write(' {palette, hilight_palette},\n')
outfile.write(' bitmap\n')
outfile.write('};\n')
if __name__ == '__main__':
import sys
import os.path
if len(sys.argv) != 3:
print "Usage: bitmap_converter.py source.png output.cxx"
sys.exit(1)
img = Image.open(sys.argv[1]).convert("RGB")
outfile = open(sys.argv[2], 'w')
palette = get_palette(img)
outfile.write(
'''
/* Automatically NuttX bitmap file. */
/* Generated from %(src)s by bitmap_converter.py. */
#include <nxconfig.hxx>
#include <crlepalettebitmap.hxx>
#define BITMAP_WIDTH %(width)s
#define BITMAP_HEIGHT %(height)s
#define BITMAP_PALETTESIZE %(palettesize)s
''' % {'src': sys.argv[1], 'width': img.size[0], 'height': img.size[1],
'palettesize': len(palette)}
)
name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
write_palette(outfile, palette)
write_image(outfile, img, palette)
write_descriptor(outfile, name)
| 26.912752
| 100
| 0.635162
|
'''This script converts from any image type supported by
Python imaging library to the RLE-encoded format used by
NxWidgets.
'''
from PIL import Image
def get_palette(img, maxcolors = 255):
'''Returns a list of colors. If there are too many colors in the image,
the least used are removed.
'''
img = img.convert("RGB")
colors = img.getcolors(65536)
colors.sort(key = lambda c: -c[0])
return [c[1] for c in colors[:maxcolors]]
def write_palette(outfile, palette):
'''Write the palette (normal and highlight) to the output file.'''
outfile.write('static const NXWidgets::nxwidget_pixel_t palette[BITMAP_PALETTESIZE] =\n');
outfile.write('{\n')
for i in range(0, len(palette), 4):
outfile.write(' ');
for r, g, b in palette[i:i+4]:
outfile.write('MKRGB(%3d,%3d,%3d), ' % (r, g, b))
outfile.write('\n');
outfile.write('};\n\n')
outfile.write('static const NXWidgets::nxwidget_pixel_t hilight_palette[BITMAP_PALETTESIZE] =\n');
outfile.write('{\n')
for i in range(0, len(palette), 4):
outfile.write(' ');
for r, g, b in palette[i:i+4]:
r = min(255, r + 50)
g = min(255, g + 50)
b = min(255, b + 50)
outfile.write('MKRGB(%3d,%3d,%3d), ' % (r, g, b))
outfile.write('\n');
outfile.write('};\n\n')
def quantize(color, palette):
'''Return the color index to closest match in the palette.'''
try:
return palette.index(color)
except ValueError:
def distance(color2):
return sum([(a - b)**2 for a, b in zip(color, color2)])
return palette.index(min(palette, key = distance));
def encode_row(img, palette, y):
'''RLE-encode one row of image data.'''
entries = []
color = None
repeats = 0
for x in range(0, img.size[0]):
c = quantize(img.getpixel((x, y)), palette)
if c == color and repeats < 255:
repeats += 1
else:
if color is not None:
entries.append((repeats, color))
repeats = 1
color = c
if color is not None:
entries.append((repeats, color))
return entries
def write_image(outfile, img, palette):
'''Write the image contents to the output file.'''
outfile.write('static const NXWidgets::SRlePaletteBitmapEntry bitmap[] =\n');
outfile.write('{\n');
for y in range(0, img.size[1]):
entries = encode_row(img, palette, y)
row = ""
for r, c in entries:
if len(row) > 60:
outfile.write(' ' + row + '\n')
row = ""
row += '{%3d, %3d}, ' % (r, c)
row += ' ' * (73 - len(row))
outfile.write(' ' + row + '/* Row %d */\n' % y)
outfile.write('};\n\n');
def write_descriptor(outfile, name):
'''Write the public descriptor structure for the image.'''
outfile.write('extern const struct NXWidgets::SRlePaletteBitmap g_%s =\n' % name)
outfile.write('{\n')
outfile.write(' CONFIG_NXWIDGETS_BPP,\n')
outfile.write(' CONFIG_NXWIDGETS_FMT,\n')
outfile.write(' BITMAP_PALETTESIZE,\n')
outfile.write(' BITMAP_WIDTH,\n')
outfile.write(' BITMAP_HEIGHT,\n')
outfile.write(' {palette, hilight_palette},\n')
outfile.write(' bitmap\n')
outfile.write('};\n')
if __name__ == '__main__':
import sys
import os.path
if len(sys.argv) != 3:
print "Usage: bitmap_converter.py source.png output.cxx"
sys.exit(1)
img = Image.open(sys.argv[1]).convert("RGB")
outfile = open(sys.argv[2], 'w')
palette = get_palette(img)
outfile.write(
'''
/* Automatically NuttX bitmap file. */
/* Generated from %(src)s by bitmap_converter.py. */
#include <nxconfig.hxx>
#include <crlepalettebitmap.hxx>
#define BITMAP_WIDTH %(width)s
#define BITMAP_HEIGHT %(height)s
#define BITMAP_PALETTESIZE %(palettesize)s
''' % {'src': sys.argv[1], 'width': img.size[0], 'height': img.size[1],
'palettesize': len(palette)}
)
name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
write_palette(outfile, palette)
write_image(outfile, img, palette)
write_descriptor(outfile, name)
| false
| true
|
f71a1006eb8da62d4f7fca2700df5904cd0816c1
| 12,567
|
py
|
Python
|
keras/wrappers/scikit_learn.py
|
phanvanthinh98/keras_LSTM
|
b22cff1e9fd762226ec3dc9d3af3e300484dd833
|
[
"Apache-2.0"
] | 1
|
2021-05-03T05:10:03.000Z
|
2021-05-03T05:10:03.000Z
|
keras/wrappers/scikit_learn.py
|
phanvanthinh98/keras_LSTM
|
b22cff1e9fd762226ec3dc9d3af3e300484dd833
|
[
"Apache-2.0"
] | null | null | null |
keras/wrappers/scikit_learn.py
|
phanvanthinh98/keras_LSTM
|
b22cff1e9fd762226ec3dc9d3af3e300484dd833
|
[
"Apache-2.0"
] | 1
|
2021-11-25T00:17:16.000Z
|
2021-11-25T00:17:16.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for using the Scikit-Learn API with Keras models."""
# pylint: disable=g-classes-have-attributes
import copy
import types
import numpy as np
from keras import losses
from keras.models import Sequential
from keras.utils.generic_utils import has_arg
from keras.utils.np_utils import to_categorical
from tensorflow.python.util.tf_export import keras_export
class BaseWrapper(object):
"""Base class for the Keras scikit-learn wrapper.
Warning: This class should not be used directly.
Use descendant classes instead.
Args:
build_fn: callable function or class instance
**sk_params: model parameters & fitting parameters
The `build_fn` should construct, compile and return a Keras model, which
will then be used to fit/predict. One of the following
three values could be passed to `build_fn`:
1. A function
2. An instance of a class that implements the `__call__` method
3. None. This means you implement a class that inherits from either
`KerasClassifier` or `KerasRegressor`. The `__call__` method of the
present class will then be treated as the default `build_fn`.
`sk_params` takes both model parameters and fitting parameters. Legal model
parameters are the arguments of `build_fn`. Note that like all other
estimators in scikit-learn, `build_fn` should provide default values for
its arguments, so that you could create the estimator without passing any
values to `sk_params`.
`sk_params` could also accept parameters for calling `fit`, `predict`,
`predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`).
fitting (predicting) parameters are selected in the following order:
1. Values passed to the dictionary arguments of
`fit`, `predict`, `predict_proba`, and `score` methods
2. Values passed to `sk_params`
3. The default values of the `keras.models.Sequential`
`fit`, `predict`, `predict_proba` and `score` methods
When using scikit-learn's `grid_search` API, legal tunable parameters are
those you could pass to `sk_params`, including fitting parameters.
In other words, you could use `grid_search` to search for the best
`batch_size` or `epochs` as well as the model parameters.
"""
def __init__(self, build_fn=None, **sk_params):
self.build_fn = build_fn
self.sk_params = sk_params
self.check_params(sk_params)
def check_params(self, params):
"""Checks for user typos in `params`.
Args:
params: dictionary; the parameters to be checked
Raises:
ValueError: if any member of `params` is not a valid argument.
"""
legal_params_fns = [
Sequential.fit, Sequential.predict, Sequential.predict_classes,
Sequential.evaluate
]
if self.build_fn is None:
legal_params_fns.append(self.__call__)
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
legal_params_fns.append(self.build_fn.__call__)
else:
legal_params_fns.append(self.build_fn)
for params_name in params:
for fn in legal_params_fns:
if has_arg(fn, params_name):
break
else:
if params_name != 'nb_epoch':
raise ValueError('{} is not a legal parameter'.format(params_name))
def get_params(self, **params): # pylint: disable=unused-argument
"""Gets parameters for this estimator.
Args:
**params: ignored (exists for API compatibility).
Returns:
Dictionary of parameter names mapped to their values.
"""
res = self.sk_params.copy()
res.update({'build_fn': self.build_fn})
return res
def set_params(self, **params):
"""Sets the parameters of this estimator.
Args:
**params: Dictionary of parameter names mapped to their values.
Returns:
self
"""
self.check_params(params)
self.sk_params.update(params)
return self
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Args:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
"""
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
if (losses.is_categorical_crossentropy(self.model.loss) and
len(y.shape) != 2):
y = to_categorical(y)
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
history = self.model.fit(x, y, **fit_args)
return history
def filter_sk_params(self, fn, override=None):
"""Filters `sk_params` and returns those in `fn`'s arguments.
Args:
fn : arbitrary function
override: dictionary, values to override `sk_params`
Returns:
res : dictionary containing variables
in both `sk_params` and `fn`'s arguments.
"""
override = override or {}
res = {}
for name, value in self.sk_params.items():
if has_arg(fn, name):
res.update({name: value})
res.update(override)
return res
@keras_export('keras.wrappers.scikit_learn.KerasClassifier')
class KerasClassifier(BaseWrapper):
"""Implementation of the scikit-learn classifier API for Keras.
"""
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Args:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
Raises:
ValueError: In case of invalid shape for `y` argument.
"""
y = np.array(y)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
return super(KerasClassifier, self).fit(x, y, **kwargs)
def predict(self, x, **kwargs):
"""Returns the class predictions for the given test data.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
preds: array-like, shape `(n_samples,)`
Class predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
classes = self.model.predict_classes(x, **kwargs)
return self.classes_[classes]
def predict_proba(self, x, **kwargs):
"""Returns class probability estimates for the given test data.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
proba: array-like, shape `(n_samples, n_outputs)`
Class probability estimates.
In the case of binary classification,
to match the scikit-learn API,
will return an array of shape `(n_samples, 2)`
(instead of `(n_sample, 1)` as in Keras).
"""
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict(x, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
def score(self, x, y, **kwargs):
"""Returns the mean accuracy on the given test data and labels.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
Raises:
ValueError: If the underlying model isn't configured to
compute accuracy. You should pass `metrics=["accuracy"]` to
the `.compile()` method of the model.
"""
y = np.searchsorted(self.classes_, y)
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model.evaluate(x, y, **kwargs)
if not isinstance(outputs, list):
outputs = [outputs]
for name, output in zip(self.model.metrics_names, outputs):
if name in ['accuracy', 'acc']:
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
@keras_export('keras.wrappers.scikit_learn.KerasRegressor')
class KerasRegressor(BaseWrapper):
"""Implementation of the scikit-learn regressor API for Keras.
"""
def predict(self, x, **kwargs):
"""Returns predictions for the given test data.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict`.
Returns:
preds: array-like, shape `(n_samples,)`
Predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return np.squeeze(self.model.predict(x, **kwargs))
def score(self, x, y, **kwargs):
"""Returns the mean loss on the given test data and labels.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
"""
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss = self.model.evaluate(x, y, **kwargs)
if isinstance(loss, list):
return -loss[0]
return -loss
| 35.600567
| 80
| 0.659585
|
import copy
import types
import numpy as np
from keras import losses
from keras.models import Sequential
from keras.utils.generic_utils import has_arg
from keras.utils.np_utils import to_categorical
from tensorflow.python.util.tf_export import keras_export
class BaseWrapper(object):
def __init__(self, build_fn=None, **sk_params):
self.build_fn = build_fn
self.sk_params = sk_params
self.check_params(sk_params)
def check_params(self, params):
legal_params_fns = [
Sequential.fit, Sequential.predict, Sequential.predict_classes,
Sequential.evaluate
]
if self.build_fn is None:
legal_params_fns.append(self.__call__)
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
legal_params_fns.append(self.build_fn.__call__)
else:
legal_params_fns.append(self.build_fn)
for params_name in params:
for fn in legal_params_fns:
if has_arg(fn, params_name):
break
else:
if params_name != 'nb_epoch':
raise ValueError('{} is not a legal parameter'.format(params_name))
def get_params(self, **params):
res = self.sk_params.copy()
res.update({'build_fn': self.build_fn})
return res
def set_params(self, **params):
self.check_params(params)
self.sk_params.update(params)
return self
def fit(self, x, y, **kwargs):
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
if (losses.is_categorical_crossentropy(self.model.loss) and
len(y.shape) != 2):
y = to_categorical(y)
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
history = self.model.fit(x, y, **fit_args)
return history
def filter_sk_params(self, fn, override=None):
override = override or {}
res = {}
for name, value in self.sk_params.items():
if has_arg(fn, name):
res.update({name: value})
res.update(override)
return res
@keras_export('keras.wrappers.scikit_learn.KerasClassifier')
class KerasClassifier(BaseWrapper):
def fit(self, x, y, **kwargs):
y = np.array(y)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
return super(KerasClassifier, self).fit(x, y, **kwargs)
def predict(self, x, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
classes = self.model.predict_classes(x, **kwargs)
return self.classes_[classes]
def predict_proba(self, x, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict(x, **kwargs)
if probs.shape[1] == 1:
probs = np.hstack([1 - probs, probs])
return probs
def score(self, x, y, **kwargs):
y = np.searchsorted(self.classes_, y)
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model.evaluate(x, y, **kwargs)
if not isinstance(outputs, list):
outputs = [outputs]
for name, output in zip(self.model.metrics_names, outputs):
if name in ['accuracy', 'acc']:
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
@keras_export('keras.wrappers.scikit_learn.KerasRegressor')
class KerasRegressor(BaseWrapper):
def predict(self, x, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return np.squeeze(self.model.predict(x, **kwargs))
def score(self, x, y, **kwargs):
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss = self.model.evaluate(x, y, **kwargs)
if isinstance(loss, list):
return -loss[0]
return -loss
| true
| true
|
f71a12030f0c487777bd6c37ee0b866b3054ef36
| 1,894
|
py
|
Python
|
backend/user/tests/test_models.py
|
Ssents/stonewell_tech
|
2466dbd26105f630bccd87146253ac8adfc4e0bb
|
[
"MIT"
] | 1
|
2022-03-25T07:44:19.000Z
|
2022-03-25T07:44:19.000Z
|
backend/user/tests/test_models.py
|
Ssents/stonewell_tech
|
2466dbd26105f630bccd87146253ac8adfc4e0bb
|
[
"MIT"
] | null | null | null |
backend/user/tests/test_models.py
|
Ssents/stonewell_tech
|
2466dbd26105f630bccd87146253ac8adfc4e0bb
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
'''
Test that creating a user with an email is successful
'''
email = 'test@gmail.com'
password = '456@3'
username = 'test1'
user = get_user_model().objects.create_user(
email = email,
username = username
)
user.set_password(password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_user_email_is_normalised(self):
'''
Test that user email used to sign in is normalized
'''
email = 'test@STONEWELLTECH.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_create_user_invalid_email(self):
'''
Test creating user with no email raises an error
'''
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_super_user(self):
'''Test creating a superuser'''
user = get_user_model().objects.create_superuser(
'test@stonewelltech.com',
'test123'
)
self.assertTrue(user.is_superuser) # is_superuser is added by PermissionsMixin
self.assertTrue(user.is_staff)
class UserModelTests(TestCase):
'''
Test whether the user characteristics are saved well
'''
def setUp(self):
self.client = Client()
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email = 'user@stonewelltech.com',
username = 'Test username'
)
user.set_password(password)
| 32.101695
| 87
| 0.621964
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@gmail.com'
password = '456@3'
username = 'test1'
user = get_user_model().objects.create_user(
email = email,
username = username
)
user.set_password(password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_user_email_is_normalised(self):
email = 'test@STONEWELLTECH.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_create_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_super_user(self):
user = get_user_model().objects.create_superuser(
'test@stonewelltech.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
class UserModelTests(TestCase):
def setUp(self):
self.client = Client()
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email = 'user@stonewelltech.com',
username = 'Test username'
)
user.set_password(password)
| true
| true
|
f71a13679ad5560a4a0a810a20a468a27ec122dd
| 6,128
|
py
|
Python
|
devday/talk/migrations/0044_auto_20200310_2010.py
|
jenslauterbach/devday_website
|
a827c9237e656842542eff07ec9fa7b39716a0ee
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 6
|
2018-09-30T20:18:01.000Z
|
2020-03-12T09:03:38.000Z
|
devday/talk/migrations/0044_auto_20200310_2010.py
|
jenslauterbach/devday_website
|
a827c9237e656842542eff07ec9fa7b39716a0ee
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 260
|
2018-09-30T14:17:57.000Z
|
2022-03-04T13:48:34.000Z
|
devday/talk/migrations/0044_auto_20200310_2010.py
|
jenslauterbach/devday_website
|
a827c9237e656842542eff07ec9fa7b39716a0ee
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 9
|
2018-09-30T13:17:21.000Z
|
2020-10-03T12:55:05.000Z
|
# Generated by Django 2.2.10 on 2020-03-10 20:10
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
def migrate_speakers(apps, schema_editor):
Talk = apps.get_model("talk", "Talk")
TalkPublishedSpeaker = apps.get_model("talk", "TalkPublishedSpeaker")
TalkDraftSpeaker = apps.get_model("talk", "TalkDraftSpeaker")
db_alias = schema_editor.connection.alias
for talk in Talk.objects.using(db_alias).all():
if talk.published_speaker is not None:
TalkPublishedSpeaker.objects.using(db_alias).create(
published_speaker_id=talk.published_speaker.id, talk_id=talk.id, order=1
)
if talk.draft_speaker is not None:
TalkDraftSpeaker.objects.using(db_alias).create(
draft_speaker_id=talk.draft_speaker.id, talk_id=talk.id, order=1
)
class Migration(migrations.Migration):
dependencies = [
("speaker", "0003_auto_20181019_0948"),
("talk", "0043_auto_20200310_1737"),
]
operations = [
migrations.CreateModel(
name="TalkPublishedSpeaker",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"order",
models.PositiveIntegerField(
db_index=True, editable=False, verbose_name="order"
),
),
(
"published_speaker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="speaker.PublishedSpeaker",
verbose_name="Published speaker",
),
),
(
"talk",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="talk.Talk",
verbose_name="Talk",
),
),
],
options={
"ordering": ("order",),
"verbose_name": "Talk published speaker",
"verbose_name_plural": "Talk published speakers",
"unique_together": {("talk", "published_speaker")},
},
),
migrations.CreateModel(
name="TalkDraftSpeaker",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"order",
models.PositiveIntegerField(
db_index=True, editable=False, verbose_name="order"
),
),
(
"draft_speaker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="speaker.Speaker",
verbose_name="Speaker",
),
),
(
"talk",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="talk.Talk",
verbose_name="Talk",
),
),
],
options={
"ordering": ("order",),
"verbose_name": "Talk draft speaker",
"verbose_name_plural": "Talk draft speakers",
"unique_together": {("talk", "draft_speaker")},
},
),
migrations.RunPython(migrate_speakers),
migrations.RemoveField(model_name="talk", name="draft_speaker"),
migrations.RemoveField(model_name="talk", name="published_speaker"),
migrations.AddField(
model_name="talk",
name="draft_speakers",
field=models.ManyToManyField(
blank=True,
through="talk.TalkDraftSpeaker",
to="speaker.Speaker",
verbose_name="Speaker (draft)",
),
),
migrations.AddField(
model_name="talk",
name="published_speakers",
field=models.ManyToManyField(
blank=True,
through="talk.TalkPublishedSpeaker",
to="speaker.PublishedSpeaker",
verbose_name="Speaker (public)",
),
),
]
| 35.218391
| 88
| 0.436847
|
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
def migrate_speakers(apps, schema_editor):
Talk = apps.get_model("talk", "Talk")
TalkPublishedSpeaker = apps.get_model("talk", "TalkPublishedSpeaker")
TalkDraftSpeaker = apps.get_model("talk", "TalkDraftSpeaker")
db_alias = schema_editor.connection.alias
for talk in Talk.objects.using(db_alias).all():
if talk.published_speaker is not None:
TalkPublishedSpeaker.objects.using(db_alias).create(
published_speaker_id=talk.published_speaker.id, talk_id=talk.id, order=1
)
if talk.draft_speaker is not None:
TalkDraftSpeaker.objects.using(db_alias).create(
draft_speaker_id=talk.draft_speaker.id, talk_id=talk.id, order=1
)
class Migration(migrations.Migration):
dependencies = [
("speaker", "0003_auto_20181019_0948"),
("talk", "0043_auto_20200310_1737"),
]
operations = [
migrations.CreateModel(
name="TalkPublishedSpeaker",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"order",
models.PositiveIntegerField(
db_index=True, editable=False, verbose_name="order"
),
),
(
"published_speaker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="speaker.PublishedSpeaker",
verbose_name="Published speaker",
),
),
(
"talk",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="talk.Talk",
verbose_name="Talk",
),
),
],
options={
"ordering": ("order",),
"verbose_name": "Talk published speaker",
"verbose_name_plural": "Talk published speakers",
"unique_together": {("talk", "published_speaker")},
},
),
migrations.CreateModel(
name="TalkDraftSpeaker",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"order",
models.PositiveIntegerField(
db_index=True, editable=False, verbose_name="order"
),
),
(
"draft_speaker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="speaker.Speaker",
verbose_name="Speaker",
),
),
(
"talk",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="talk.Talk",
verbose_name="Talk",
),
),
],
options={
"ordering": ("order",),
"verbose_name": "Talk draft speaker",
"verbose_name_plural": "Talk draft speakers",
"unique_together": {("talk", "draft_speaker")},
},
),
migrations.RunPython(migrate_speakers),
migrations.RemoveField(model_name="talk", name="draft_speaker"),
migrations.RemoveField(model_name="talk", name="published_speaker"),
migrations.AddField(
model_name="talk",
name="draft_speakers",
field=models.ManyToManyField(
blank=True,
through="talk.TalkDraftSpeaker",
to="speaker.Speaker",
verbose_name="Speaker (draft)",
),
),
migrations.AddField(
model_name="talk",
name="published_speakers",
field=models.ManyToManyField(
blank=True,
through="talk.TalkPublishedSpeaker",
to="speaker.PublishedSpeaker",
verbose_name="Speaker (public)",
),
),
]
| true
| true
|
f71a147252b727cb58683934b78cbaab53a991a4
| 14,687
|
py
|
Python
|
torchreid/models/mobilenetv3.py
|
daniil-lyakhov/deep-object-reid
|
b0f7d6a2d4cff8c417a66d82c09d16788d81ec67
|
[
"Apache-2.0"
] | null | null | null |
torchreid/models/mobilenetv3.py
|
daniil-lyakhov/deep-object-reid
|
b0f7d6a2d4cff8c417a66d82c09d16788d81ec67
|
[
"Apache-2.0"
] | null | null | null |
torchreid/models/mobilenetv3.py
|
daniil-lyakhov/deep-object-reid
|
b0f7d6a2d4cff8c417a66d82c09d16788d81ec67
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from torchreid.losses import AngleSimpleLinear
from torchreid.ops import Dropout, EvalModeSetter, rsc
from .common import HSigmoid, HSwish, ModelInterface, make_divisible
import timm
from torchreid.integration.nncf.compression import get_no_nncf_trace_context_manager, nullcontext
__all__ = ['mobilenetv3_large', 'mobilenetv3_large_075', 'mobilenetv3_small', 'mobilenetv3_large_150',
'mobilenetv3_large_125']
pretrained_urls = {
'mobilenetv3_small':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-small-55df8e1f.pth?raw=true',
'mobilenetv3_large':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-1cd25616.pth?raw=true',
'mobilenetv3_large_075':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-0.75-9632d2a8.pth?raw=true',
'mobilenetv3_large_21k':
'https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/mobilenetv3_large_100_miil_21k.pth'
}
SHOULD_NNCF_SKIP_SE_LAYERS = False
SHOULD_NNCF_SKIP_HEAD = False
no_nncf_se_layer_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_SE_LAYERS else nullcontext
no_nncf_head_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_HEAD else nullcontext
class SELayer(nn.Module):
def __init__(self, channel, reduction=4):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, make_divisible(channel // reduction, 8)),
nn.ReLU(inplace=True),
nn.Linear(make_divisible(channel // reduction, 8), channel),
HSigmoid()
)
def forward(self, x):
with no_nncf_se_layer_context():
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
def conv_3x3_bn(inp, oup, stride, IN_conv1=False):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup) if not IN_conv1 else nn.InstanceNorm2d(oup, affine=True),
HSwish()
)
def conv_1x1_bn(inp, oup, loss='softmax'):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
HSwish() if loss == 'softmax' else nn.PReLU()
)
class InvertedResidual(nn.Module):
def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
self.identity = stride == 1 and inp == oup
if inp == hidden_dim:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
HSwish() if use_hs else nn.ReLU(inplace=True),
# Squeeze-and-Excite
SELayer(hidden_dim) if use_se else nn.Identity(),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
HSwish() if use_hs else nn.ReLU(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
# Squeeze-and-Excite
SELayer(hidden_dim) if use_se else nn.Identity(),
HSwish() if use_hs else nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.identity:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV3(ModelInterface):
def __init__(self,
cfgs,
mode,
IN_conv1=False,
num_classes=1000,
width_mult=1.,
in_channels=3,
input_size=(224, 224),
dropout_cls = None,
pooling_type='avg',
IN_first=False,
self_challenging_cfg=False,
**kwargs):
super().__init__(**kwargs)
self.in_size = input_size
self.num_classes = num_classes
self.input_IN = nn.InstanceNorm2d(in_channels, affine=True) if IN_first else None
self.pooling_type = pooling_type
self.self_challenging_cfg = self_challenging_cfg
self.width_mult = width_mult
self.dropout_cls = dropout_cls
# setting of inverted residual blocks
self.cfgs = cfgs
assert mode in ['large', 'small']
# building first layer
input_channel = make_divisible(16 * self.width_mult, 8)
stride = 1 if self.in_size[0] < 100 else 2
layers = [conv_3x3_bn(3, input_channel, stride, IN_conv1)]
# building inverted residual blocks
block = InvertedResidual
flag = True
for k, t, c, use_se, use_hs, s in self.cfgs:
if (self.in_size[0] < 100) and (s == 2) and flag:
s = 1
flag = False
output_channel = make_divisible(c * self.width_mult, 8)
exp_size = make_divisible(input_channel * t, 8)
layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
input_channel = output_channel
self.features = nn.Sequential(*layers)
self.num_features = exp_size
# building last several layers
self.conv = conv_1x1_bn(input_channel, exp_size, self.loss)
output_channel = {'large': 1280, 'small': 1024}
output_channel = make_divisible(output_channel[mode] * self.width_mult, 8) if self.width_mult > 1.0 else output_channel[mode]
if self.loss == 'softmax' or self.loss == 'asl':
self.classifier = nn.Sequential(
nn.Linear(exp_size, output_channel),
nn.BatchNorm1d(output_channel),
HSwish(),
Dropout(**self.dropout_cls),
nn.Linear(output_channel, self.num_classes),
)
else:
assert self.loss in ['am_softmax', 'am_binary']
self.classifier = nn.Sequential(
nn.Linear(exp_size, output_channel),
nn.BatchNorm1d(output_channel),
nn.PReLU(),
Dropout(**self.dropout_cls),
AngleSimpleLinear(output_channel, self.num_classes),
)
self._initialize_weights()
self.forward = autocast(self.mix_precision)(self.forward)
def extract_features(self, x):
y = self.conv(self.features(x))
return y
def infer_head(self, x, skip_pool=False):
if not skip_pool:
glob_features = self._glob_feature_vector(x, self.pooling_type, reduce_dims=False)
else:
glob_features = x
logits = self.classifier(glob_features.view(x.shape[0], -1))
return glob_features, logits
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x, return_featuremaps=False, get_embeddings=False, gt_labels=None):
if self.input_IN is not None:
x = self.input_IN(x)
y = self.extract_features(x)
if return_featuremaps:
return y
with no_nncf_head_context():
glob_features, logits = self.infer_head(y, skip_pool=False)
if self.training and self.self_challenging_cfg.enable and gt_labels is not None:
glob_features = rsc(
features = glob_features,
scores = logits,
labels = gt_labels,
retain_p = 1.0 - self.self_challenging_cfg.drop_p,
retain_batch = 1.0 - self.self_challenging_cfg.drop_batch_p
)
with EvalModeSetter([self.output], m_type=(nn.BatchNorm1d, nn.BatchNorm2d)):
_, logits = self.infer_head(x, skip_pool=True)
if not self.training and self.is_classification():
return [logits]
if get_embeddings:
out_data = [logits, glob_features]
elif self.loss in ['softmax', 'am_softmax', 'asl', 'am_binary']:
out_data = [logits]
elif self.loss in ['triplet']:
out_data = [logits, glob_features]
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
return tuple(out_data)
def init_pretrained_weights(model, key='', **kwargs):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from torchreid.utils import load_pretrained_weights
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file)
model = load_pretrained_weights(model, cached_file, **kwargs)
def mobilenetv3_large_075(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult =.75, **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_large_075')
return net
def mobilenetv3_large(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1., **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_large')
return net
def mobilenetv3_large_150(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1.5, **kwargs)
if pretrained:
raise NotImplementedError("The weights for this configuration are not available")
return net
def mobilenetv3_large_125(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1.25, **kwargs)
if pretrained:
raise NotImplementedError("The weights for this configuration are not available")
return net
def mobilenetv3_small(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Small model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 1, 0, 2],
[3, 4.5, 24, 0, 0, 2],
[3, 3.67, 24, 0, 0, 1],
[5, 4, 40, 1, 1, 2],
[5, 6, 40, 1, 1, 1],
[5, 6, 40, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 6, 96, 1, 1, 2],
[5, 6, 96, 1, 1, 1],
[5, 6, 96, 1, 1, 1],
]
net = MobileNetV3(cfgs, mode='small', width_mult = 1., **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_small')
return net
| 34.315421
| 133
| 0.544291
|
import math
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from torchreid.losses import AngleSimpleLinear
from torchreid.ops import Dropout, EvalModeSetter, rsc
from .common import HSigmoid, HSwish, ModelInterface, make_divisible
import timm
from torchreid.integration.nncf.compression import get_no_nncf_trace_context_manager, nullcontext
__all__ = ['mobilenetv3_large', 'mobilenetv3_large_075', 'mobilenetv3_small', 'mobilenetv3_large_150',
'mobilenetv3_large_125']
pretrained_urls = {
'mobilenetv3_small':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-small-55df8e1f.pth?raw=true',
'mobilenetv3_large':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-1cd25616.pth?raw=true',
'mobilenetv3_large_075':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-0.75-9632d2a8.pth?raw=true',
'mobilenetv3_large_21k':
'https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/mobilenetv3_large_100_miil_21k.pth'
}
SHOULD_NNCF_SKIP_SE_LAYERS = False
SHOULD_NNCF_SKIP_HEAD = False
no_nncf_se_layer_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_SE_LAYERS else nullcontext
no_nncf_head_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_HEAD else nullcontext
class SELayer(nn.Module):
def __init__(self, channel, reduction=4):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, make_divisible(channel // reduction, 8)),
nn.ReLU(inplace=True),
nn.Linear(make_divisible(channel // reduction, 8), channel),
HSigmoid()
)
def forward(self, x):
with no_nncf_se_layer_context():
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
def conv_3x3_bn(inp, oup, stride, IN_conv1=False):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup) if not IN_conv1 else nn.InstanceNorm2d(oup, affine=True),
HSwish()
)
def conv_1x1_bn(inp, oup, loss='softmax'):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
HSwish() if loss == 'softmax' else nn.PReLU()
)
class InvertedResidual(nn.Module):
def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
self.identity = stride == 1 and inp == oup
if inp == hidden_dim:
self.conv = nn.Sequential(
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
HSwish() if use_hs else nn.ReLU(inplace=True),
SELayer(hidden_dim) if use_se else nn.Identity(),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
HSwish() if use_hs else nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
SELayer(hidden_dim) if use_se else nn.Identity(),
HSwish() if use_hs else nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.identity:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV3(ModelInterface):
def __init__(self,
cfgs,
mode,
IN_conv1=False,
num_classes=1000,
width_mult=1.,
in_channels=3,
input_size=(224, 224),
dropout_cls = None,
pooling_type='avg',
IN_first=False,
self_challenging_cfg=False,
**kwargs):
super().__init__(**kwargs)
self.in_size = input_size
self.num_classes = num_classes
self.input_IN = nn.InstanceNorm2d(in_channels, affine=True) if IN_first else None
self.pooling_type = pooling_type
self.self_challenging_cfg = self_challenging_cfg
self.width_mult = width_mult
self.dropout_cls = dropout_cls
self.cfgs = cfgs
assert mode in ['large', 'small']
input_channel = make_divisible(16 * self.width_mult, 8)
stride = 1 if self.in_size[0] < 100 else 2
layers = [conv_3x3_bn(3, input_channel, stride, IN_conv1)]
block = InvertedResidual
flag = True
for k, t, c, use_se, use_hs, s in self.cfgs:
if (self.in_size[0] < 100) and (s == 2) and flag:
s = 1
flag = False
output_channel = make_divisible(c * self.width_mult, 8)
exp_size = make_divisible(input_channel * t, 8)
layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
input_channel = output_channel
self.features = nn.Sequential(*layers)
self.num_features = exp_size
self.conv = conv_1x1_bn(input_channel, exp_size, self.loss)
output_channel = {'large': 1280, 'small': 1024}
output_channel = make_divisible(output_channel[mode] * self.width_mult, 8) if self.width_mult > 1.0 else output_channel[mode]
if self.loss == 'softmax' or self.loss == 'asl':
self.classifier = nn.Sequential(
nn.Linear(exp_size, output_channel),
nn.BatchNorm1d(output_channel),
HSwish(),
Dropout(**self.dropout_cls),
nn.Linear(output_channel, self.num_classes),
)
else:
assert self.loss in ['am_softmax', 'am_binary']
self.classifier = nn.Sequential(
nn.Linear(exp_size, output_channel),
nn.BatchNorm1d(output_channel),
nn.PReLU(),
Dropout(**self.dropout_cls),
AngleSimpleLinear(output_channel, self.num_classes),
)
self._initialize_weights()
self.forward = autocast(self.mix_precision)(self.forward)
def extract_features(self, x):
y = self.conv(self.features(x))
return y
def infer_head(self, x, skip_pool=False):
if not skip_pool:
glob_features = self._glob_feature_vector(x, self.pooling_type, reduce_dims=False)
else:
glob_features = x
logits = self.classifier(glob_features.view(x.shape[0], -1))
return glob_features, logits
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x, return_featuremaps=False, get_embeddings=False, gt_labels=None):
if self.input_IN is not None:
x = self.input_IN(x)
y = self.extract_features(x)
if return_featuremaps:
return y
with no_nncf_head_context():
glob_features, logits = self.infer_head(y, skip_pool=False)
if self.training and self.self_challenging_cfg.enable and gt_labels is not None:
glob_features = rsc(
features = glob_features,
scores = logits,
labels = gt_labels,
retain_p = 1.0 - self.self_challenging_cfg.drop_p,
retain_batch = 1.0 - self.self_challenging_cfg.drop_batch_p
)
with EvalModeSetter([self.output], m_type=(nn.BatchNorm1d, nn.BatchNorm2d)):
_, logits = self.infer_head(x, skip_pool=True)
if not self.training and self.is_classification():
return [logits]
if get_embeddings:
out_data = [logits, glob_features]
elif self.loss in ['softmax', 'am_softmax', 'asl', 'am_binary']:
out_data = [logits]
elif self.loss in ['triplet']:
out_data = [logits, glob_features]
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
return tuple(out_data)
def init_pretrained_weights(model, key='', **kwargs):
import os
import errno
import gdown
from torchreid.utils import load_pretrained_weights
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file)
model = load_pretrained_weights(model, cached_file, **kwargs)
def mobilenetv3_large_075(pretrained=False, **kwargs):
cfgs = [
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult =.75, **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_large_075')
return net
def mobilenetv3_large(pretrained=False, **kwargs):
cfgs = [
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1., **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_large')
return net
def mobilenetv3_large_150(pretrained=False, **kwargs):
cfgs = [
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1.5, **kwargs)
if pretrained:
raise NotImplementedError("The weights for this configuration are not available")
return net
def mobilenetv3_large_125(pretrained=False, **kwargs):
cfgs = [
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1.25, **kwargs)
if pretrained:
raise NotImplementedError("The weights for this configuration are not available")
return net
def mobilenetv3_small(pretrained=False, **kwargs):
cfgs = [
[3, 1, 16, 1, 0, 2],
[3, 4.5, 24, 0, 0, 2],
[3, 3.67, 24, 0, 0, 1],
[5, 4, 40, 1, 1, 2],
[5, 6, 40, 1, 1, 1],
[5, 6, 40, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 6, 96, 1, 1, 2],
[5, 6, 96, 1, 1, 1],
[5, 6, 96, 1, 1, 1],
]
net = MobileNetV3(cfgs, mode='small', width_mult = 1., **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_small')
return net
| true
| true
|
f71a168b25957243708b709f360ba988096918a1
| 674
|
py
|
Python
|
setup.py
|
ashwin153/pdpyras
|
19971ec2df9ab854a91b95a25de452483ea57af0
|
[
"MIT"
] | 92
|
2018-08-16T21:35:02.000Z
|
2022-03-30T06:52:21.000Z
|
setup.py
|
ashwin153/pdpyras
|
19971ec2df9ab854a91b95a25de452483ea57af0
|
[
"MIT"
] | 53
|
2018-11-26T20:18:01.000Z
|
2022-03-22T17:25:19.000Z
|
setup.py
|
ashwin153/pdpyras
|
19971ec2df9ab854a91b95a25de452483ea57af0
|
[
"MIT"
] | 22
|
2018-10-18T14:36:12.000Z
|
2022-02-06T21:52:47.000Z
|
from setuptools import setup, find_packages
__version__ = '4.3.0'
if __name__ == '__main__':
setup(
name='pdpyras',
description="PagerDuty REST API client",
long_description="A basic REST API client for PagerDuty based on Requests' Session class",
py_modules=['pdpyras'],
version=__version__,
license='MIT',
url='https://pagerduty.github.io/pdpyras',
download_url='https://pypi.org/project/pdpyras/',
install_requires=['requests', 'urllib3'],
author='Demitri Morgan',
author_email='demitri@pagerduty.com',
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, >=3.5'
)
| 33.7
| 98
| 0.614243
|
from setuptools import setup, find_packages
__version__ = '4.3.0'
if __name__ == '__main__':
setup(
name='pdpyras',
description="PagerDuty REST API client",
long_description="A basic REST API client for PagerDuty based on Requests' Session class",
py_modules=['pdpyras'],
version=__version__,
license='MIT',
url='https://pagerduty.github.io/pdpyras',
download_url='https://pypi.org/project/pdpyras/',
install_requires=['requests', 'urllib3'],
author='Demitri Morgan',
author_email='demitri@pagerduty.com',
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, >=3.5'
)
| true
| true
|
f71a16f3990d1459e27c67ec2953c6e70264c9af
| 421
|
py
|
Python
|
configs/__init__.py
|
whiplash003/pytrorch_template
|
4629ede6ade3359a12bd40269fced3b96e8d11b3
|
[
"MIT"
] | 4
|
2019-10-11T01:08:47.000Z
|
2021-02-27T13:37:05.000Z
|
configs/__init__.py
|
qilong97/PyTorch-Project-Framework
|
e1d791e9ac679907f94f0fbe7b9c930292cb61d3
|
[
"MIT"
] | null | null | null |
configs/__init__.py
|
qilong97/PyTorch-Project-Framework
|
e1d791e9ac679907f94f0fbe7b9c930292cb61d3
|
[
"MIT"
] | 5
|
2019-11-01T09:25:00.000Z
|
2021-08-23T02:48:45.000Z
|
import os
from .BaseConfig import BaseConfig
from .BaseTest import BaseTest
from .Env import env
from .Run import Run
__all__ = ['BaseConfig', 'BaseTest', 'Run', 'env', 'all']
def all(config, cfg_dir):
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
cfg_list = list()
for file in sorted(os.listdir(cfg_dir)):
cfg_list.append(config(os.path.join(cfg_dir, file)))
return cfg_list
| 21.05
| 60
| 0.684086
|
import os
from .BaseConfig import BaseConfig
from .BaseTest import BaseTest
from .Env import env
from .Run import Run
__all__ = ['BaseConfig', 'BaseTest', 'Run', 'env', 'all']
def all(config, cfg_dir):
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
cfg_list = list()
for file in sorted(os.listdir(cfg_dir)):
cfg_list.append(config(os.path.join(cfg_dir, file)))
return cfg_list
| true
| true
|
f71a18336d3c0e2f947f297b8e9e9e31ea3bbe07
| 895
|
py
|
Python
|
setup.py
|
zhs007/trdb2py
|
d07b874bd37085ed64b5c6c6c2c21a380024d082
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
zhs007/trdb2py
|
d07b874bd37085ed64b5c6c6c2c21a380024d082
|
[
"Apache-2.0"
] | 43
|
2020-12-11T09:07:51.000Z
|
2021-05-29T07:31:10.000Z
|
setup.py
|
zhs007/trdb2py
|
d07b874bd37085ed64b5c6c6c2c21a380024d082
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("VERSION", "r") as fversion:
version = fversion.read()
setuptools.setup(
name="trdb2py",
version=version,
author="Zerro Zhao",
author_email="zerrozhao@gmail.com",
description="tradingdb2 for python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/zhs007/trdb2py",
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'trdb2py=trdb2py:main'
],
},
classifiers=(
"Programming Language :: Python :: 3",
# "License :: OSI Approved :: Apache License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
),
)
| 27.121212
| 54
| 0.620112
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("VERSION", "r") as fversion:
version = fversion.read()
setuptools.setup(
name="trdb2py",
version=version,
author="Zerro Zhao",
author_email="zerrozhao@gmail.com",
description="tradingdb2 for python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/zhs007/trdb2py",
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'trdb2py=trdb2py:main'
],
},
classifiers=(
"Programming Language :: Python :: 3",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
),
)
| true
| true
|
f71a184c5dbe74ec302bac2087f436f411cf0919
| 2,633
|
py
|
Python
|
data_config.py
|
XieResearchGroup/CLEIT
|
226ece5a8763ac010610cbc9f66915caca92775e
|
[
"MIT"
] | null | null | null |
data_config.py
|
XieResearchGroup/CLEIT
|
226ece5a8763ac010610cbc9f66915caca92775e
|
[
"MIT"
] | null | null | null |
data_config.py
|
XieResearchGroup/CLEIT
|
226ece5a8763ac010610cbc9f66915caca92775e
|
[
"MIT"
] | null | null | null |
import os
"""
configuration file includes all related multi-omics datasets
"""
root_data_folder = './data'
raw_data_folder = os.path.join(root_data_folder, 'raw_dat')
preprocessed_data_folder = os.path.join(root_data_folder, 'preprocessed_dat')
gex_feature_file = os.path.join(preprocessed_data_folder, 'uq1000_gex_feature.csv')
xena_mut_uq_file = os.path.join(preprocessed_data_folder, 'xena_uq_mut_standarized.csv')
ccle_mut_uq_file = os.path.join(preprocessed_data_folder, 'ccle_uq_mut_standarized.csv')
#mapping_file = os.path.join(raw_data_folder, 'mart_export.txt')
gene_feature_file = os.path.join(preprocessed_data_folder, 'CosmicHGNC_list.tsv')
#Xena datasets
xena_folder = os.path.join(raw_data_folder, 'Xena')
xena_id_mapping_file = os.path.join(xena_folder, 'gencode.v23.annotation.gene.probemap')
xena_gex_file = os.path.join(xena_folder, 'tcga_RSEM_gene_tpm.gz')
xena_preprocessed_gex_file = os.path.join(preprocessed_data_folder, 'xena_gex')
xena_mut_file = os.path.join(xena_folder, 'mc3.v0.2.8.PUBLIC.nonsilentGene.xena.gz')
xena_preprocessed_mut_file = os.path.join(preprocessed_data_folder, 'xena_mut')
xena_sample_file = os.path.join(xena_folder, 'TCGA_phenotype_denseDataOnlyDownload.tsv.gz')
#CCLE datasets
ccle_folder = os.path.join(raw_data_folder, 'CCLE')
ccle_gex_file = os.path.join(ccle_folder, 'CCLE_expression.csv')
ccle_preprocessed_gex_file = os.path.join(preprocessed_data_folder, 'ccle_gex')
ccle_mut_file = os.path.join(ccle_folder, 'CCLE_mutations.csv')
ccle_preprocessed_mut_file = os.path.join(preprocessed_data_folder, 'ccle_mut')
ccle_sample_file = os.path.join(ccle_folder, 'sample_info.csv')
#GDSC datasets
gdsc_folder = os.path.join(raw_data_folder, 'GDSC')
gdsc_target_file1 = os.path.join(gdsc_folder, 'GDSC1_fitted_dose_response_25Feb20.csv')
gdsc_target_file2 = os.path.join(gdsc_folder, 'GDSC2_fitted_dose_response_25Feb20.csv')
gdsc_target_file = os.path.join(gdsc_folder, 'sanger-dose-response.csv')
gdsc_sample_file = os.path.join(gdsc_folder, 'gdsc_cell_line_annotation.csv')
gdsc_preprocessed_target_file = os.path.join(preprocessed_data_folder, 'gdsc_target')
#PPI network files
network_folder = os.path.join(raw_data_folder, 'network')
string_network_folder = os.path.join(network_folder, 'STRING')
raw_string_network_file = os.path.join(string_network_folder, '9606.protein.links.v11.0.txt.gz')
string_id_mapping_file = os.path.join(string_network_folder, '9606.protein.info.v11.0.txt.gz')
current_network_file = os.path.join(string_network_folder, 'string_network_hgnc.txt')
propagation_kernel_file = os.path.join(string_network_folder, 'string_propagation_kernel.file')
| 57.23913
| 96
| 0.821117
|
import os
root_data_folder = './data'
raw_data_folder = os.path.join(root_data_folder, 'raw_dat')
preprocessed_data_folder = os.path.join(root_data_folder, 'preprocessed_dat')
gex_feature_file = os.path.join(preprocessed_data_folder, 'uq1000_gex_feature.csv')
xena_mut_uq_file = os.path.join(preprocessed_data_folder, 'xena_uq_mut_standarized.csv')
ccle_mut_uq_file = os.path.join(preprocessed_data_folder, 'ccle_uq_mut_standarized.csv')
gene_feature_file = os.path.join(preprocessed_data_folder, 'CosmicHGNC_list.tsv')
xena_folder = os.path.join(raw_data_folder, 'Xena')
xena_id_mapping_file = os.path.join(xena_folder, 'gencode.v23.annotation.gene.probemap')
xena_gex_file = os.path.join(xena_folder, 'tcga_RSEM_gene_tpm.gz')
xena_preprocessed_gex_file = os.path.join(preprocessed_data_folder, 'xena_gex')
xena_mut_file = os.path.join(xena_folder, 'mc3.v0.2.8.PUBLIC.nonsilentGene.xena.gz')
xena_preprocessed_mut_file = os.path.join(preprocessed_data_folder, 'xena_mut')
xena_sample_file = os.path.join(xena_folder, 'TCGA_phenotype_denseDataOnlyDownload.tsv.gz')
ccle_folder = os.path.join(raw_data_folder, 'CCLE')
ccle_gex_file = os.path.join(ccle_folder, 'CCLE_expression.csv')
ccle_preprocessed_gex_file = os.path.join(preprocessed_data_folder, 'ccle_gex')
ccle_mut_file = os.path.join(ccle_folder, 'CCLE_mutations.csv')
ccle_preprocessed_mut_file = os.path.join(preprocessed_data_folder, 'ccle_mut')
ccle_sample_file = os.path.join(ccle_folder, 'sample_info.csv')
gdsc_folder = os.path.join(raw_data_folder, 'GDSC')
gdsc_target_file1 = os.path.join(gdsc_folder, 'GDSC1_fitted_dose_response_25Feb20.csv')
gdsc_target_file2 = os.path.join(gdsc_folder, 'GDSC2_fitted_dose_response_25Feb20.csv')
gdsc_target_file = os.path.join(gdsc_folder, 'sanger-dose-response.csv')
gdsc_sample_file = os.path.join(gdsc_folder, 'gdsc_cell_line_annotation.csv')
gdsc_preprocessed_target_file = os.path.join(preprocessed_data_folder, 'gdsc_target')
network_folder = os.path.join(raw_data_folder, 'network')
string_network_folder = os.path.join(network_folder, 'STRING')
raw_string_network_file = os.path.join(string_network_folder, '9606.protein.links.v11.0.txt.gz')
string_id_mapping_file = os.path.join(string_network_folder, '9606.protein.info.v11.0.txt.gz')
current_network_file = os.path.join(string_network_folder, 'string_network_hgnc.txt')
propagation_kernel_file = os.path.join(string_network_folder, 'string_propagation_kernel.file')
| true
| true
|
f71a18b20364f8e9aea1382e54d3b363fe159bcb
| 4,188
|
py
|
Python
|
uptimer/events/meta.py
|
janw/uptimer
|
967b5ed907d620f79ee29ab8be52ba89f1686513
|
[
"Apache-2.0"
] | 1
|
2021-08-23T18:40:03.000Z
|
2021-08-23T18:40:03.000Z
|
uptimer/events/meta.py
|
janw/uptimer
|
967b5ed907d620f79ee29ab8be52ba89f1686513
|
[
"Apache-2.0"
] | 1
|
2021-01-17T13:31:41.000Z
|
2021-01-17T13:31:41.000Z
|
uptimer/events/meta.py
|
janw/uptimer
|
967b5ed907d620f79ee29ab8be52ba89f1686513
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABCMeta
from uuid import UUID
import jsonschema
from dateutil.parser import parse as dateparse
from uptimer.events import SCHEMATA_PATH
from uptimer.events.cache import schema_cache
from uptimer.helpers import to_bool, to_none
class EventDefinitionError(ValueError):
pass
class EventMeta(ABCMeta, metaclass=ABCMeta):
schema_path: str = f"file:///{SCHEMATA_PATH}"
"""Base-URL at which the schema resolver will look up schema references."""
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
schema = attrs.pop("schema", None)
# `table` can be a valid None, so use False as placeholder of missing property
table = attrs.pop("table", False)
if not schema:
raise EventDefinitionError(f"Class {name} did not declare a JSON schema.")
if table is False:
raise EventDefinitionError(
f"Class {name} did not declare a database table mapping."
)
# Now resolve and parse the JSON schema for additional properties; generating
# useful representations, the proper schema resolver for validation, etc.
# Inserting them in the `attrs` dictionary will cause them to become regular
# class variables, available in every instantiated class object.
schema_spec = schema_cache[schema]
if schema_spec["title"] != name:
raise EventDefinitionError(
f"Name of class {name} must be equal to "
f"JSON schema title '{schema_spec['title']}'"
)
properties_dict = cls._collect_properties(schema_spec)
properties = list(properties_dict.keys())
property_cast_mapping = {
prop: cls.property_to_python(spec) for prop, spec in properties_dict.items()
}
resolver = jsonschema.RefResolver(cls.schema_path, schema_spec)
attrs.update(
dict(
schema=schema,
table=table,
schema_spec=schema_spec,
properties_dict=properties_dict,
properties=properties,
property_cast_mapping=property_cast_mapping,
_resolver=resolver,
)
)
return super_new(cls, name, bases, attrs, **kwargs)
@staticmethod
def _collect_properties(schema):
"""Collects a list of all (including nested and conditional) properties."""
props = dict()
array_iter = []
if isinstance(schema, list):
array_iter = enumerate(schema)
elif isinstance(schema, dict):
array_iter = schema.items()
for key, value in array_iter:
if key == "properties":
props.update(value)
elif key == "required":
continue
else:
props.update(EventMeta._collect_properties(value))
return props
@staticmethod
def property_to_python(property_spec):
"""
Returns a list of appropriate python-native datatypes for a schema property.
Based on the event class'es schema, a list of callables is returned that a
value might be tried against. The list is ordered from most to least strict
as to prevent falsely casting values as a less strict type.
Possible types taken from JSON schema validation specification
http://json-schema.org/latest/json-schema-validation.html#rfc.section.6.1.1
"""
propformat = property_spec.get("format")
if propformat == "date-time":
return [dateparse]
if propformat == "uuid":
return [UUID]
proptypes = property_spec.get("type")
if not proptypes:
return []
if not isinstance(proptypes, list):
proptypes = [proptypes]
callables = []
if "null" in proptypes:
callables.append(to_none)
if "boolean" in proptypes:
callables.append(to_bool)
if "integer" in proptypes:
callables.append(int)
if "number" in proptypes:
callables.append(float)
return callables
| 34.9
| 88
| 0.61915
|
from abc import ABCMeta
from uuid import UUID
import jsonschema
from dateutil.parser import parse as dateparse
from uptimer.events import SCHEMATA_PATH
from uptimer.events.cache import schema_cache
from uptimer.helpers import to_bool, to_none
class EventDefinitionError(ValueError):
pass
class EventMeta(ABCMeta, metaclass=ABCMeta):
schema_path: str = f"file:///{SCHEMATA_PATH}"
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
schema = attrs.pop("schema", None)
table = attrs.pop("table", False)
if not schema:
raise EventDefinitionError(f"Class {name} did not declare a JSON schema.")
if table is False:
raise EventDefinitionError(
f"Class {name} did not declare a database table mapping."
)
schema_spec = schema_cache[schema]
if schema_spec["title"] != name:
raise EventDefinitionError(
f"Name of class {name} must be equal to "
f"JSON schema title '{schema_spec['title']}'"
)
properties_dict = cls._collect_properties(schema_spec)
properties = list(properties_dict.keys())
property_cast_mapping = {
prop: cls.property_to_python(spec) for prop, spec in properties_dict.items()
}
resolver = jsonschema.RefResolver(cls.schema_path, schema_spec)
attrs.update(
dict(
schema=schema,
table=table,
schema_spec=schema_spec,
properties_dict=properties_dict,
properties=properties,
property_cast_mapping=property_cast_mapping,
_resolver=resolver,
)
)
return super_new(cls, name, bases, attrs, **kwargs)
@staticmethod
def _collect_properties(schema):
props = dict()
array_iter = []
if isinstance(schema, list):
array_iter = enumerate(schema)
elif isinstance(schema, dict):
array_iter = schema.items()
for key, value in array_iter:
if key == "properties":
props.update(value)
elif key == "required":
continue
else:
props.update(EventMeta._collect_properties(value))
return props
@staticmethod
def property_to_python(property_spec):
propformat = property_spec.get("format")
if propformat == "date-time":
return [dateparse]
if propformat == "uuid":
return [UUID]
proptypes = property_spec.get("type")
if not proptypes:
return []
if not isinstance(proptypes, list):
proptypes = [proptypes]
callables = []
if "null" in proptypes:
callables.append(to_none)
if "boolean" in proptypes:
callables.append(to_bool)
if "integer" in proptypes:
callables.append(int)
if "number" in proptypes:
callables.append(float)
return callables
| true
| true
|
f71a191b20700bf1958d34785c00621fcbe6eda7
| 12,820
|
py
|
Python
|
hvac/api/secrets_engines/gcp.py
|
ddeka2910/hvac
|
80cf3950157bf003ee6622e6db84bb9d6c90e5f1
|
[
"Apache-2.0"
] | 1
|
2020-12-14T04:01:10.000Z
|
2020-12-14T04:01:10.000Z
|
hvac/api/secrets_engines/gcp.py
|
ddeka2910/hvac
|
80cf3950157bf003ee6622e6db84bb9d6c90e5f1
|
[
"Apache-2.0"
] | 2
|
2019-07-08T03:09:38.000Z
|
2021-07-08T18:17:51.000Z
|
hvac/api/secrets_engines/gcp.py
|
ddeka2910/hvac
|
80cf3950157bf003ee6622e6db84bb9d6c90e5f1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Gcp methods module."""
import json
import logging
from hvac import exceptions, utils
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.gcp import ALLOWED_SECRETS_TYPES, SERVICE_ACCOUNT_KEY_ALGORITHMS, SERVICE_ACCOUNT_KEY_TYPES
DEFAULT_MOUNT_POINT = 'gcp'
class Gcp(VaultApiBase):
"""Google Cloud Secrets Engine (API).
Reference: https://www.vaultproject.io/api/secret/gcp/index.html
"""
def configure(self, credentials=None, ttl=None, max_ttl=None, mount_point=DEFAULT_MOUNT_POINT):
"""Configure shared information for the Gcp secrets engine.
Supported methods:
POST: /{mount_point}/config. Produces: 204 (empty body)
:param credentials: JSON credentials (either file contents or '@path/to/file') See docs for alternative ways to
pass in to this parameter, as well as the required permissions.
:type credentials: str | unicode
:param ttl: – Specifies default config TTL for long-lived credentials (i.e. service account keys). Accepts
integer number of seconds or Go duration format string.
:type ttl: int | str
:param max_ttl: Specifies the maximum config TTL for long-lived credentials (i.e. service account keys). Accepts
integer number of seconds or Go duration format string.**
:type max_ttl: int | str
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = utils.remove_nones({
'credentials': credentials,
'ttl': ttl,
'max_ttl': max_ttl,
})
api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
def read_config(self, mount_point=DEFAULT_MOUNT_POINT):
"""Read the configured shared information for the Gcp secrets engine.
Credentials will be omitted from returned data.
Supported methods:
GET: /{mount_point}/config. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point)
return self._adapter.get(
url=api_path,
)
def create_or_update_roleset(self, name, project, bindings, secret_type=None, token_scopes=None,
mount_point=DEFAULT_MOUNT_POINT):
"""Create a roleset or update an existing roleset.
See roleset docs for the GCP secrets backend to learn more about what happens when you create or update a
roleset.
Supported methods:
POST: /{mount_point}/roleset/{name}. Produces: 204 (empty body)
:param name: Name of the role. Cannot be updated.
:type name: str | unicode
:param project: Name of the GCP project that this roleset's service account will belong to. Cannot be updated.
:type project: str | unicode
:param bindings: Bindings configuration string (expects HCL or JSON format in raw or base64-encoded string)
:type bindings: str | unicode
:param secret_type: Cannot be updated.
:type secret_type: str | unicode
:param token_scopes: List of OAuth scopes to assign to access_token secrets generated under this role set
(access_token role sets only)
:type token_scopes: list[str]
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if secret_type is not None and secret_type not in ALLOWED_SECRETS_TYPES:
error_msg = 'unsupported secret_type argument provided "{arg}", supported types: "{secret_type}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=secret_type,
secret_type=','.join(ALLOWED_SECRETS_TYPES),
))
if isinstance(bindings, dict):
bindings = json.dumps(bindings).replace(' ', '')
logging.debug('bindings: %s' % bindings)
params = {
'project': project,
'bindings': bindings,
}
params.update(
utils.remove_nones({
'secret_type': secret_type,
'token_scopes': token_scopes,
})
)
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
def rotate_roleset_account(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Rotate the service account this roleset uses to generate secrets.
This also replaces the key access_token roleset. This can be used to invalidate old secrets generated by the
roleset or fix issues if a roleset's service account (and/or keys) was changed outside of Vault (i.e.
through GCP APIs/cloud console).
Supported methods:
POST: /{mount_point}/roleset/{name}/rotate. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}/rotate',
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
)
def rotate_roleset_account_key(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Rotate the service account key this roleset uses to generate access tokens.
This does not recreate the roleset service account.
Supported methods:
POST: /{mount_point}/roleset/{name}/rotate-key. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}/rotate-key',
mount_point=mount_point,
name=name
)
return self._adapter.post(
url=api_path,
)
def read_roleset(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Read a roleset.
Supported methods:
GET: /{mount_point}/roleset/{name}. Produces: 200 application/json
:param name: Name of the role.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.get(
url=api_path,
)
def list_rolesets(self, mount_point=DEFAULT_MOUNT_POINT):
"""List configured rolesets.
Supported methods:
LIST: /{mount_point}/rolesets. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url('/v1/{mount_point}/rolesets', mount_point=mount_point)
return self._adapter.list(
url=api_path,
)
def delete_roleset(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Delete an existing roleset by the given name.
Supported methods:
DELETE: /{mount_point}/roleset/{name} Produces: 200 application/json
:param name: Name of the role.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}',
name=name,
mount_point=mount_point,
)
return self._adapter.delete(
url=api_path,
)
def generate_oauth2_access_token(self, roleset, mount_point=DEFAULT_MOUNT_POINT):
"""Generate an OAuth2 token with the scopes defined on the roleset.
This OAuth access token can be used in GCP API calls, e.g. curl -H "Authorization: Bearer $TOKEN" ...
Supported methods:
GET: /{mount_point}/token/{roleset}. Produces: 200 application/json
:param roleset: Name of an roleset with secret type access_token to generate access_token under.
:type roleset: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
'/v1/{mount_point}/token/{roleset}',
mount_point=mount_point,
roleset=roleset,
)
return self._adapter.get(
url=api_path,
)
def generate_service_account_key(self, roleset, key_algorithm='KEY_ALG_RSA_2048',
key_type='TYPE_GOOGLE_CREDENTIALS_FILE', method='POST',
mount_point=DEFAULT_MOUNT_POINT):
"""Generate Secret (IAM Service Account Creds): Service Account Key
If using GET ('read'), the optional parameters will be set to their defaults. Use POST if you want to specify
different values for these params.
:param roleset: Name of an roleset with secret type service_account_key to generate key under.
:type roleset: str | unicode
:param key_algorithm: Key algorithm used to generate key. Defaults to 2k RSA key You probably should not choose
other values (i.e. 1k),
:type key_algorithm: str | unicode
:param key_type: Private key type to generate. Defaults to JSON credentials file.
:type key_type: str | unicode
:param method: Supported methods:
POST: /{mount_point}/key/{roleset}. Produces: 200 application/json
GET: /{mount_point}/key/{roleset}. Produces: 200 application/json
:type method: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
'/v1/{mount_point}/key/{roleset}',
mount_point=mount_point,
roleset=roleset,
)
if method == 'POST':
if key_algorithm not in SERVICE_ACCOUNT_KEY_ALGORITHMS:
error_msg = 'unsupported key_algorithm argument provided "{arg}", supported algorithms: "{algorithms}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_algorithm,
algorithms=','.join(SERVICE_ACCOUNT_KEY_ALGORITHMS),
))
if key_type not in SERVICE_ACCOUNT_KEY_TYPES:
error_msg = 'unsupported key_type argument provided "{arg}", supported types: "{key_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
key_types=','.join(SERVICE_ACCOUNT_KEY_TYPES),
))
params = {
'key_algorithm': key_algorithm,
'key_type': key_type,
}
response = self._adapter.post(
url=api_path,
json=params,
)
elif method == 'GET':
response = self._adapter.get(
url=api_path,
)
else:
error_message = '"method" parameter provided invalid value; POST or GET allowed, "{method}" provided'.format(method=method)
raise exceptions.ParamValidationError(error_message)
return response
| 39.690402
| 135
| 0.616147
|
import json
import logging
from hvac import exceptions, utils
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.gcp import ALLOWED_SECRETS_TYPES, SERVICE_ACCOUNT_KEY_ALGORITHMS, SERVICE_ACCOUNT_KEY_TYPES
DEFAULT_MOUNT_POINT = 'gcp'
class Gcp(VaultApiBase):
def configure(self, credentials=None, ttl=None, max_ttl=None, mount_point=DEFAULT_MOUNT_POINT):
params = utils.remove_nones({
'credentials': credentials,
'ttl': ttl,
'max_ttl': max_ttl,
})
api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
def read_config(self, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point)
return self._adapter.get(
url=api_path,
)
def create_or_update_roleset(self, name, project, bindings, secret_type=None, token_scopes=None,
mount_point=DEFAULT_MOUNT_POINT):
if secret_type is not None and secret_type not in ALLOWED_SECRETS_TYPES:
error_msg = 'unsupported secret_type argument provided "{arg}", supported types: "{secret_type}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=secret_type,
secret_type=','.join(ALLOWED_SECRETS_TYPES),
))
if isinstance(bindings, dict):
bindings = json.dumps(bindings).replace(' ', '')
logging.debug('bindings: %s' % bindings)
params = {
'project': project,
'bindings': bindings,
}
params.update(
utils.remove_nones({
'secret_type': secret_type,
'token_scopes': token_scopes,
})
)
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
def rotate_roleset_account(self, name, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}/rotate',
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
)
def rotate_roleset_account_key(self, name, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}/rotate-key',
mount_point=mount_point,
name=name
)
return self._adapter.post(
url=api_path,
)
def read_roleset(self, name, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.get(
url=api_path,
)
def list_rolesets(self, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url('/v1/{mount_point}/rolesets', mount_point=mount_point)
return self._adapter.list(
url=api_path,
)
def delete_roleset(self, name, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url(
'/v1/{mount_point}/roleset/{name}',
name=name,
mount_point=mount_point,
)
return self._adapter.delete(
url=api_path,
)
def generate_oauth2_access_token(self, roleset, mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url(
'/v1/{mount_point}/token/{roleset}',
mount_point=mount_point,
roleset=roleset,
)
return self._adapter.get(
url=api_path,
)
def generate_service_account_key(self, roleset, key_algorithm='KEY_ALG_RSA_2048',
key_type='TYPE_GOOGLE_CREDENTIALS_FILE', method='POST',
mount_point=DEFAULT_MOUNT_POINT):
api_path = utils.format_url(
'/v1/{mount_point}/key/{roleset}',
mount_point=mount_point,
roleset=roleset,
)
if method == 'POST':
if key_algorithm not in SERVICE_ACCOUNT_KEY_ALGORITHMS:
error_msg = 'unsupported key_algorithm argument provided "{arg}", supported algorithms: "{algorithms}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_algorithm,
algorithms=','.join(SERVICE_ACCOUNT_KEY_ALGORITHMS),
))
if key_type not in SERVICE_ACCOUNT_KEY_TYPES:
error_msg = 'unsupported key_type argument provided "{arg}", supported types: "{key_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
key_types=','.join(SERVICE_ACCOUNT_KEY_TYPES),
))
params = {
'key_algorithm': key_algorithm,
'key_type': key_type,
}
response = self._adapter.post(
url=api_path,
json=params,
)
elif method == 'GET':
response = self._adapter.get(
url=api_path,
)
else:
error_message = '"method" parameter provided invalid value; POST or GET allowed, "{method}" provided'.format(method=method)
raise exceptions.ParamValidationError(error_message)
return response
| true
| true
|
f71a193cb6d839929618acd446da28cc742371b1
| 2,846
|
py
|
Python
|
examples/tutorial_api_python/02_whole_body_from_image.py
|
ExSidius/openpose
|
69f64206d63a156fa60e9a0a0de6738d27d1c00d
|
[
"DOC"
] | 12
|
2019-05-10T09:56:39.000Z
|
2021-08-09T03:42:28.000Z
|
examples/tutorial_api_python/02_whole_body_from_image.py
|
ExSidius/openpose
|
69f64206d63a156fa60e9a0a0de6738d27d1c00d
|
[
"DOC"
] | null | null | null |
examples/tutorial_api_python/02_whole_body_from_image.py
|
ExSidius/openpose
|
69f64206d63a156fa60e9a0a0de6738d27d1c00d
|
[
"DOC"
] | 7
|
2019-06-14T03:38:09.000Z
|
2021-08-09T03:43:27.000Z
|
# From Python
# It requires OpenCV installed for Python
import sys
import cv2
import os
from sys import platform
import argparse
# Import Openpose (Windows/Ubuntu/OSX)
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
# Windows Import
if platform == "win32":
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '/../../python/openpose/Release');
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
import pyopenpose as op
else:
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append('../../python');
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
# Flags
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", default="../../../examples/media/COCO_val2014_000000000241.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
args = parser.parse_known_args()
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../../../models/"
params["face"] = True
params["hand"] = True
# Add others in path?
for i in range(0, len(args[1])):
curr_item = args[1][i]
if i != len(args[1])-1: next_item = args[1][i+1]
else: next_item = "1"
if "--" in curr_item and "--" in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = "1"
elif "--" in curr_item and "--" not in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = next_item
# Construct it from system arguments
# op.init_argv(args[1])
# oppython = op.OpenposePython()
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Process Image
datum = op.Datum()
imageToProcess = cv2.imread(args[0].image_path)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop([datum])
# Display Image
print("Body keypoints: \n" + str(datum.poseKeypoints))
print("Face keypoints: \n" + str(datum.faceKeypoints))
print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
cv2.imshow("OpenPose 1.4.0 - Tutorial Python API", datum.cvOutputData)
cv2.waitKey(0)
| 38.986301
| 289
| 0.685875
|
import sys
import cv2
import os
from sys import platform
import argparse
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
if platform == "win32":
sys.path.append(dir_path + '/../../python/openpose/Release');
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
import pyopenpose as op
else:
sys.path.append('../../python');
from openpose import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", default="../../../examples/media/COCO_val2014_000000000241.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
args = parser.parse_known_args()
params = dict()
params["model_folder"] = "../../../models/"
params["face"] = True
params["hand"] = True
for i in range(0, len(args[1])):
curr_item = args[1][i]
if i != len(args[1])-1: next_item = args[1][i+1]
else: next_item = "1"
if "--" in curr_item and "--" in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = "1"
elif "--" in curr_item and "--" not in next_item:
key = curr_item.replace('-','')
if key not in params: params[key] = next_item
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
datum = op.Datum()
imageToProcess = cv2.imread(args[0].image_path)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop([datum])
print("Body keypoints: \n" + str(datum.poseKeypoints))
print("Face keypoints: \n" + str(datum.faceKeypoints))
print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
cv2.imshow("OpenPose 1.4.0 - Tutorial Python API", datum.cvOutputData)
cv2.waitKey(0)
| true
| true
|
f71a1a2a2d27e09348b69858a543626888f37405
| 21,978
|
py
|
Python
|
lingvo/core/conv_layers_builder_test.py
|
Harshs27/lingvo
|
bd396e651488b2e2c4a7416be077b4a0226c87c8
|
[
"Apache-2.0"
] | 2,611
|
2018-10-16T20:14:10.000Z
|
2022-03-31T14:48:41.000Z
|
lingvo/core/conv_layers_builder_test.py
|
Harshs27/lingvo
|
bd396e651488b2e2c4a7416be077b4a0226c87c8
|
[
"Apache-2.0"
] | 249
|
2018-10-27T06:02:29.000Z
|
2022-03-30T18:00:39.000Z
|
lingvo/core/conv_layers_builder_test.py
|
Harshs27/lingvo
|
bd396e651488b2e2c4a7416be077b4a0226c87c8
|
[
"Apache-2.0"
] | 436
|
2018-10-25T05:31:45.000Z
|
2022-03-31T07:26:03.000Z
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv layers builder."""
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import bn_layers
from lingvo.core import conv_layers_builder
from lingvo.core import conv_layers_with_time_padding
from lingvo.core import layers
from lingvo.core import test_utils
import numpy as np
class ConvPaddedLayersTest(test_utils.TestCase):
def _ConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, out_dim, filter_shape, conv_last,
causal_conv):
with self.session(use_gpu=True) as sess:
p1 = layers.Conv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, out_dim],
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().Conv2D(
'conv_2d02',
in_dim,
out_dim,
filter_shape,
stride=stride,
dilation=dilation,
activation=activation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
w1 = l1_theta.w
w2 = l2_theta.conv_2d.w
# b1 = l1_theta.b
# b2 = l2_theta.bn_or_bias.b
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
w1_v = sess.run(w1)
v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testConvBasic(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvBnWnTanh(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvGn(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
in_dim = 3
out_dim = 4
filter_shape = [2, 2]
conv_last = False
causal_conv = False
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().Conv2D(
'conv_2d02',
in_dim,
out_dim,
filter_shape,
stride=stride,
dilation=dilation,
activation=activation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[-0.35070014, -1.7821487, 0.8349923, 1.1709788],
[-0.18872532, 0.9702145, 0.5534694, -1.1386856]],
[[0.34970748, -0.5403709, -0.9809327, -2.0930214],
[0.54232424, 1.1565661, 1.0349312, 1.3458138]],
[[0, 0, 0, 0], [0, 0, 0, 0]]]
self.assertAllClose(v, expected_out)
def testConvLastWnTanh(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = True
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvLastCausal(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def _DepthwiseConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv):
with self.session(use_gpu=True) as sess:
p1 = layers.DepthwiseConv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, depth_multiplier],
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().DepthwiseConv2D(
'conv_2d02',
in_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
w1 = l1_theta.w
w2 = l2_theta.conv_2d.w
# b1 = l1_theta.b
# b2 = l2_theta.bn_or_bias.b
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
w1_v = sess.run([w1])[0]
v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testDepthConvBasic(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
depth_multiplier = 2
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvBnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvGn(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
in_dim = 4
depth_multiplier = 1
filter_shape = [2, 2]
conv_last = False
causal_conv = False
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().DepthwiseConv2D(
'conv_2d02',
in_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[-0.77095497, 0.30285388, -0.05714864, 1.0386012],
[0.74034333, 0.04982221, -0.41769135, -2.9531932],
[-0.2647084, -0.1936804, 0.6598473, 0.42537105]],
[[1.3095646, -0.85996866, 2.2734299, -1.8457952],
[-0.9542263, -0.14199251, 0.51472515, 0.91931283],
[0.47267163, 1.4824618, 0.4548889, 0.93488806]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]
self.assertAllClose(expected_out, v)
def testDepthConvLastWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 2]
conv_last = True
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvLastCausal(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def _SeparableConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier, out_dim,
filter_shape, conv_last, causal_conv,
assert_equality=True):
with self.session(use_gpu=True) as sess:
p1 = layers.SeparableConv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, out_dim],
depth_multiplier=depth_multiplier,
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().SeparableConv2D(
'conv_2d02',
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
pointwise_conv_w1 = l1_theta.w
depth_conv_w1 = l1_theta.depthwise_conv.w
pointwise_conv_w2 = l2_theta.conv_1x1.w
depth_conv_w2 = l2_theta.conv_2d.w
# b1 = l1_theta.b
# b2 = l2_theta.bn_or_bias.b
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
p_w1_v, d_w1_v = sess.run([pointwise_conv_w1, depth_conv_w1])
v2, p2 = sess.run([conv_out2, out2_padding],
feed_dict={
pointwise_conv_w2: p_w1_v,
depth_conv_w2: d_w1_v
})
if assert_equality:
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testSeparableConv2DLayerBasic(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv)
def testSeparableConvWnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 1]
conv_last = False
causal_conv = True
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv)
def testSeparableConvLastBnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 1]
conv_last = True
causal_conv = True
# New implementation is not equivallent to the old.
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv,
assert_equality=False)
def testSeparableConvGn(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
in_dim = 4
depth_multiplier = 1
out_dim = 2
filter_shape = [2, 1]
conv_last = True
causal_conv = True
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().SeparableConv2D(
'conv_2d02',
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[0.00963847, -0.04019006], [0.36265337, -0.06592329],
[0.65582913, -0.1533944]],
[[0.7512939, -0.7282307], [0.96100605, -1.9509676],
[0.4639647, 0.2485837]], [[0., 0.], [0., 0.], [0., 0.]]]
self.assertAllClose(expected_out, v)
class CausalPoolingLayerTest(test_utils.TestCase, parameterized.TestCase):
"""Tests for CausalPoolingLayer."""
@parameterized.named_parameters(
{
'testcase_name': 'max_pooling',
'pooling_type': 'MAX',
'left_context': 2,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, 0, 2, 4, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling',
'pooling_type': 'AVG',
'left_context': 2,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 1, 3, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'max_pooling_large_window',
'pooling_type': 'MAX',
'left_context': 10,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, 0, 2, 4, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling_large_window',
'pooling_type': 'AVG',
'left_context': 10,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 0, 1, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling_infinte_window',
'pooling_type': 'AVG',
'left_context': -1,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 0, 1, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
})
def testSimpleCase(self, pooling_type, left_context, inputs, input_paddings,
expected_output, expected_output_padding):
inputs = inputs[np.newaxis, :, np.newaxis, np.newaxis]
input_paddings = input_paddings[np.newaxis, :]
param = conv_layers_builder.CausalPoolingLayer.Params().Set(
name='test_layer', pooling_type=pooling_type, left_context=left_context)
pooling_layer = param.Instantiate()
with self.session(use_gpu=True) as sess:
inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
input_paddings = tf.convert_to_tensor(input_paddings, dtype=tf.float32)
output, output_paddings = pooling_layer.FPropDefaultTheta(
inputs, input_paddings)
tf.global_variables_initializer().run()
output_val, output_paddings_val = sess.run([output, output_paddings])
self.assertAllClose(expected_output, output_val.flatten())
self.assertAllEqual(expected_output_padding, output_paddings_val.flatten())
if __name__ == '__main__':
tf.test.main()
| 35.620746
| 80
| 0.602193
|
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import bn_layers
from lingvo.core import conv_layers_builder
from lingvo.core import conv_layers_with_time_padding
from lingvo.core import layers
from lingvo.core import test_utils
import numpy as np
class ConvPaddedLayersTest(test_utils.TestCase):
def _ConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, out_dim, filter_shape, conv_last,
causal_conv):
with self.session(use_gpu=True) as sess:
p1 = layers.Conv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, out_dim],
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().Conv2D(
'conv_2d02',
in_dim,
out_dim,
filter_shape,
stride=stride,
dilation=dilation,
activation=activation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
w1 = l1_theta.w
w2 = l2_theta.conv_2d.w
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
w1_v = sess.run(w1)
v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testConvBasic(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvBnWnTanh(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvGn(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
in_dim = 3
out_dim = 4
filter_shape = [2, 2]
conv_last = False
causal_conv = False
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().Conv2D(
'conv_2d02',
in_dim,
out_dim,
filter_shape,
stride=stride,
dilation=dilation,
activation=activation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[-0.35070014, -1.7821487, 0.8349923, 1.1709788],
[-0.18872532, 0.9702145, 0.5534694, -1.1386856]],
[[0.34970748, -0.5403709, -0.9809327, -2.0930214],
[0.54232424, 1.1565661, 1.0349312, 1.3458138]],
[[0, 0, 0, 0], [0, 0, 0, 0]]]
self.assertAllClose(v, expected_out)
def testConvLastWnTanh(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = True
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvLastCausal(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def _DepthwiseConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv):
with self.session(use_gpu=True) as sess:
p1 = layers.DepthwiseConv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, depth_multiplier],
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().DepthwiseConv2D(
'conv_2d02',
in_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
w1 = l1_theta.w
w2 = l2_theta.conv_2d.w
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
w1_v = sess.run([w1])[0]
v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testDepthConvBasic(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
depth_multiplier = 2
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvBnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvGn(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
in_dim = 4
depth_multiplier = 1
filter_shape = [2, 2]
conv_last = False
causal_conv = False
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().DepthwiseConv2D(
'conv_2d02',
in_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[-0.77095497, 0.30285388, -0.05714864, 1.0386012],
[0.74034333, 0.04982221, -0.41769135, -2.9531932],
[-0.2647084, -0.1936804, 0.6598473, 0.42537105]],
[[1.3095646, -0.85996866, 2.2734299, -1.8457952],
[-0.9542263, -0.14199251, 0.51472515, 0.91931283],
[0.47267163, 1.4824618, 0.4548889, 0.93488806]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]
self.assertAllClose(expected_out, v)
def testDepthConvLastWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 2]
conv_last = True
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvLastCausal(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def _SeparableConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier, out_dim,
filter_shape, conv_last, causal_conv,
assert_equality=True):
with self.session(use_gpu=True) as sess:
p1 = layers.SeparableConv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, out_dim],
depth_multiplier=depth_multiplier,
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().SeparableConv2D(
'conv_2d02',
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
pointwise_conv_w1 = l1_theta.w
depth_conv_w1 = l1_theta.depthwise_conv.w
pointwise_conv_w2 = l2_theta.conv_1x1.w
depth_conv_w2 = l2_theta.conv_2d.w
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
p_w1_v, d_w1_v = sess.run([pointwise_conv_w1, depth_conv_w1])
v2, p2 = sess.run([conv_out2, out2_padding],
feed_dict={
pointwise_conv_w2: p_w1_v,
depth_conv_w2: d_w1_v
})
if assert_equality:
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testSeparableConv2DLayerBasic(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv)
def testSeparableConvWnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 1]
conv_last = False
causal_conv = True
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv)
def testSeparableConvLastBnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv,
assert_equality=False)
def testSeparableConvGn(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
in_dim = 4
depth_multiplier = 1
out_dim = 2
filter_shape = [2, 1]
conv_last = True
causal_conv = True
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().SeparableConv2D(
'conv_2d02',
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[0.00963847, -0.04019006], [0.36265337, -0.06592329],
[0.65582913, -0.1533944]],
[[0.7512939, -0.7282307], [0.96100605, -1.9509676],
[0.4639647, 0.2485837]], [[0., 0.], [0., 0.], [0., 0.]]]
self.assertAllClose(expected_out, v)
class CausalPoolingLayerTest(test_utils.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'max_pooling',
'pooling_type': 'MAX',
'left_context': 2,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, 0, 2, 4, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling',
'pooling_type': 'AVG',
'left_context': 2,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 1, 3, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'max_pooling_large_window',
'pooling_type': 'MAX',
'left_context': 10,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, 0, 2, 4, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling_large_window',
'pooling_type': 'AVG',
'left_context': 10,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 0, 1, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling_infinte_window',
'pooling_type': 'AVG',
'left_context': -1,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 0, 1, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
})
def testSimpleCase(self, pooling_type, left_context, inputs, input_paddings,
expected_output, expected_output_padding):
inputs = inputs[np.newaxis, :, np.newaxis, np.newaxis]
input_paddings = input_paddings[np.newaxis, :]
param = conv_layers_builder.CausalPoolingLayer.Params().Set(
name='test_layer', pooling_type=pooling_type, left_context=left_context)
pooling_layer = param.Instantiate()
with self.session(use_gpu=True) as sess:
inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
input_paddings = tf.convert_to_tensor(input_paddings, dtype=tf.float32)
output, output_paddings = pooling_layer.FPropDefaultTheta(
inputs, input_paddings)
tf.global_variables_initializer().run()
output_val, output_paddings_val = sess.run([output, output_paddings])
self.assertAllClose(expected_output, output_val.flatten())
self.assertAllEqual(expected_output_padding, output_paddings_val.flatten())
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f71a1a4b45bdc87ee38fe7fcbd95d71913d56e29
| 3,212
|
py
|
Python
|
flickr.py
|
vicrobot/Flickr-Downloader
|
fecac723fca3c0f3e72b9d4581b0bcf52dfda3b5
|
[
"MIT"
] | null | null | null |
flickr.py
|
vicrobot/Flickr-Downloader
|
fecac723fca3c0f3e72b9d4581b0bcf52dfda3b5
|
[
"MIT"
] | null | null | null |
flickr.py
|
vicrobot/Flickr-Downloader
|
fecac723fca3c0f3e72b9d4581b0bcf52dfda3b5
|
[
"MIT"
] | null | null | null |
import flickrapi
import flickr_api
import urllib.request
import os
import sys
if __name__ != "__main__":
print("File 'flickr.py' not meant for transcendings and imports, direct use only")
sys.exit(0)
#functions
def url_list_maker(uiv):
count = 0
photos = flickr.walk_user(user_id = uiv, per_page = 100, extras = 'url_o')
url_list = []
for photo in photos:
try:
url_list.append(photo.get('url_o')) # o ->original size; other vars may not have all images.
except: pass
return url_list
def mkname(name):
num = 0
name = str(name)
new_n = name[:]
while os.path.exists(new_n):
num += 1
new_n = name + str(num)
return new_n
def checkIds(akv, skv, print_M = 0):
flickr_api.set_keys(api_key = akv, api_secret = skv)
try: flickr_api.Person.findByUserName('vicro_bot').id
except flickr_api.flickrerrors.FlickrAPIError:
if print_M: print("Wrong Keys!!", "Try again")
return 0
return 1
#reading logs
try:
with open('logs', 'r') as var:
lines = [i.rstrip() for i in var.readlines() if len(i) ]
except FileNotFoundError:
with open('logs', 'w+') as var:
lines = []
bool_contain = -1
bool_ask_old = 0
dict_ids = {}
#ids_handeling
for line in lines:
if 'id1' in line:
bool_contain += 1
dict_ids['id1'] = ''.join(line.split(' ')[1:])
if 'id2' in line:
bool_contain += 1
dict_ids['id2'] = ''.join(line.split(' ')[1:])
if bool_contain == 1: bool_contain = checkIds(dict_ids['id1'],dict_ids['id2'])
if bool_contain == 1:
inp_ask_old = input('Use previously saved keys?(Yes or No)').rstrip().lower()
if inp_ask_old == 'yes':
bool_ask_old = 1
api_key_val = dict_ids['id1']
secret_key_val = dict_ids['id2']
#print(secret_key_val)
if not bool_ask_old:
while 1:
var1_ = 1
api_key_val = input('Give your API key ').rstrip()
secret_key_val = input('Give your API secret ').rstrip()
var1_ = checkIds(api_key_val,secret_key_val, print_M = 1)
if var1_: break
writable = ['id1 {}\n'.format(api_key_val), 'id2 {}\n'.format(secret_key_val)]
with open('logs', 'w+') as var:
var.writelines(writable)
#some globals' setup
flickr=flickrapi.FlickrAPI(api_key_val, secret_key_val)
flickr_api.set_keys(api_key = api_key_val, api_secret = secret_key_val)
user_name = input('Give user name:- \n').rstrip()
user_id_val = flickr_api.Person.findByUserName(user_name).id
urls = url_list_maker(user_id_val)
#directory work
new_dir = mkname('Flickr_Imgs_{}'.format('_'.join(user_name.split(' '))))
os.mkdir(new_dir)
os.chdir(new_dir)
# terminal show
counter = 0
var = 100.0/(len(urls)*1.0)
print('Downloading ... {:05}%'.format(int(counter)), end = '', flush = True)
b, imagecount = 0, 1
for i in urls:
try: urllib.request.urlretrieve( i, '{1}{0}'.format(imagecount, user_name[:1]))
except KeyboardInterrupt:
print('\nAbort')
sys.exit()
except Exception: pass
counter += var
print('\b'*6, end = '', flush = True)
imagecount += 1
print('{:05}'.format(counter)[:5]+'%', end = '', flush = True)
print('\nDone')
| 29.740741
| 104
| 0.634184
|
import flickrapi
import flickr_api
import urllib.request
import os
import sys
if __name__ != "__main__":
print("File 'flickr.py' not meant for transcendings and imports, direct use only")
sys.exit(0)
def url_list_maker(uiv):
count = 0
photos = flickr.walk_user(user_id = uiv, per_page = 100, extras = 'url_o')
url_list = []
for photo in photos:
try:
url_list.append(photo.get('url_o'))
except: pass
return url_list
def mkname(name):
num = 0
name = str(name)
new_n = name[:]
while os.path.exists(new_n):
num += 1
new_n = name + str(num)
return new_n
def checkIds(akv, skv, print_M = 0):
flickr_api.set_keys(api_key = akv, api_secret = skv)
try: flickr_api.Person.findByUserName('vicro_bot').id
except flickr_api.flickrerrors.FlickrAPIError:
if print_M: print("Wrong Keys!!", "Try again")
return 0
return 1
try:
with open('logs', 'r') as var:
lines = [i.rstrip() for i in var.readlines() if len(i) ]
except FileNotFoundError:
with open('logs', 'w+') as var:
lines = []
bool_contain = -1
bool_ask_old = 0
dict_ids = {}
for line in lines:
if 'id1' in line:
bool_contain += 1
dict_ids['id1'] = ''.join(line.split(' ')[1:])
if 'id2' in line:
bool_contain += 1
dict_ids['id2'] = ''.join(line.split(' ')[1:])
if bool_contain == 1: bool_contain = checkIds(dict_ids['id1'],dict_ids['id2'])
if bool_contain == 1:
inp_ask_old = input('Use previously saved keys?(Yes or No)').rstrip().lower()
if inp_ask_old == 'yes':
bool_ask_old = 1
api_key_val = dict_ids['id1']
secret_key_val = dict_ids['id2']
if not bool_ask_old:
while 1:
var1_ = 1
api_key_val = input('Give your API key ').rstrip()
secret_key_val = input('Give your API secret ').rstrip()
var1_ = checkIds(api_key_val,secret_key_val, print_M = 1)
if var1_: break
writable = ['id1 {}\n'.format(api_key_val), 'id2 {}\n'.format(secret_key_val)]
with open('logs', 'w+') as var:
var.writelines(writable)
flickr=flickrapi.FlickrAPI(api_key_val, secret_key_val)
flickr_api.set_keys(api_key = api_key_val, api_secret = secret_key_val)
user_name = input('Give user name:- \n').rstrip()
user_id_val = flickr_api.Person.findByUserName(user_name).id
urls = url_list_maker(user_id_val)
#directory work
new_dir = mkname('Flickr_Imgs_{}'.format('_'.join(user_name.split(' '))))
os.mkdir(new_dir)
os.chdir(new_dir)
# terminal show
counter = 0
var = 100.0/(len(urls)*1.0)
print('Downloading ... {:05}%'.format(int(counter)), end = '', flush = True)
b, imagecount = 0, 1
for i in urls:
try: urllib.request.urlretrieve( i, '{1}{0}'.format(imagecount, user_name[:1]))
except KeyboardInterrupt:
print('\nAbort')
sys.exit()
except Exception: pass
counter += var
print('\b'*6, end = '', flush = True)
imagecount += 1
print('{:05}'.format(counter)[:5]+'%', end = '', flush = True)
print('\nDone')
| true
| true
|
f71a1ac02563cd912e303318164fa03a1b3451a2
| 527
|
py
|
Python
|
mydemo/matplotlibDemo/clickEvent.py
|
541867329/pydata-notebook
|
867f204d7abac96dbae80e6cdd2e3661e554d1dd
|
[
"MIT"
] | null | null | null |
mydemo/matplotlibDemo/clickEvent.py
|
541867329/pydata-notebook
|
867f204d7abac96dbae80e6cdd2e3661e554d1dd
|
[
"MIT"
] | null | null | null |
mydemo/matplotlibDemo/clickEvent.py
|
541867329/pydata-notebook
|
867f204d7abac96dbae80e6cdd2e3661e554d1dd
|
[
"MIT"
] | null | null | null |
from matplotlib.pyplot import figure, show
import numpy as npy
from numpy.random import rand
if 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)
x, y, c, s = rand(4, 100)
def onpick3(event):
ind = event.ind
print('onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind))
fig = figure()
ax1 = fig.add_subplot(111)
col = ax1.scatter(x, y, 100 * s, c, picker=True)
# fig.savefig('pscoll.eps')
fig.canvas.mpl_connect('pick_event', onpick3)
show()
| 23.954545
| 81
| 0.652751
|
from matplotlib.pyplot import figure, show
import numpy as npy
from numpy.random import rand
if 1:
x, y, c, s = rand(4, 100)
def onpick3(event):
ind = event.ind
print('onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind))
fig = figure()
ax1 = fig.add_subplot(111)
col = ax1.scatter(x, y, 100 * s, c, picker=True)
fig.canvas.mpl_connect('pick_event', onpick3)
show()
| true
| true
|
f71a1af80e296be1c22cd3a838643279ddd193cd
| 313
|
py
|
Python
|
Lib/objc/_IOAccelerator.py
|
kanishpatel/Pyto
|
feec7a1a54f635a6375fa7ede074ff35afbfbb95
|
[
"MIT"
] | null | null | null |
Lib/objc/_IOAccelerator.py
|
kanishpatel/Pyto
|
feec7a1a54f635a6375fa7ede074ff35afbfbb95
|
[
"MIT"
] | null | null | null |
Lib/objc/_IOAccelerator.py
|
kanishpatel/Pyto
|
feec7a1a54f635a6375fa7ede074ff35afbfbb95
|
[
"MIT"
] | null | null | null |
'''
Classes from the 'IOAccelerator' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
IOAccelMTLEvent = _Class('IOAccelMTLEvent')
| 15.65
| 43
| 0.661342
|
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
IOAccelMTLEvent = _Class('IOAccelMTLEvent')
| true
| true
|
f71a1b665af36fbf12688a3e2396cbb73c2862b5
| 230
|
py
|
Python
|
app/books/urls.py
|
bayocr/example-docker-django
|
550d7ce3e0dd5643616245eed9cbb9ae96812c11
|
[
"MIT"
] | null | null | null |
app/books/urls.py
|
bayocr/example-docker-django
|
550d7ce3e0dd5643616245eed9cbb9ae96812c11
|
[
"MIT"
] | 1
|
2021-05-25T00:56:48.000Z
|
2021-05-25T00:56:48.000Z
|
app/books/urls.py
|
bayocr/example-docker-django
|
550d7ce3e0dd5643616245eed9cbb9ae96812c11
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import BookDetailView, BookListView
app_name = 'books'
urlpatterns = [
path('', BookListView.as_view(), name='list'),
path('<int:pk>/', BookDetailView.as_view(), name='detail')
]
| 23
| 62
| 0.695652
|
from django.urls import path
from .views import BookDetailView, BookListView
app_name = 'books'
urlpatterns = [
path('', BookListView.as_view(), name='list'),
path('<int:pk>/', BookDetailView.as_view(), name='detail')
]
| true
| true
|
f71a1c4b664e4d204ee0e4819ed647e5e03c985d
| 318
|
py
|
Python
|
cwr_validator/__init__.py
|
weso/CWR-Validator
|
18b83136f44f5bdd2f66c9af866b0e37acf682cb
|
[
"MIT"
] | 16
|
2015-04-21T15:50:14.000Z
|
2021-07-14T07:22:32.000Z
|
cwr_validator/__init__.py
|
weso/CWR-Validator
|
18b83136f44f5bdd2f66c9af866b0e37acf682cb
|
[
"MIT"
] | 12
|
2015-02-02T11:32:01.000Z
|
2015-04-20T10:45:36.000Z
|
cwr_validator/__init__.py
|
weso/CWR-Validator
|
18b83136f44f5bdd2f66c9af866b0e37acf682cb
|
[
"MIT"
] | 4
|
2015-02-01T21:45:03.000Z
|
2018-08-20T07:51:02.000Z
|
# -*- coding: utf-8 -*-
from cwr_validator.app import create_app
"""
CWR Data API Validator WS
~~~~~~~~~~~~~~~~~~~~~~~~~
Validator Web Service for Common Works Registrations.
:copyright: (c) 2015 by WESO
:license: MIT, see LICENSE for more details.
"""
__version__ = '0.0.1'
__license__ = 'MIT'
| 21.2
| 57
| 0.613208
|
from cwr_validator.app import create_app
__version__ = '0.0.1'
__license__ = 'MIT'
| true
| true
|
f71a1e01f6c37695492ea9e9df0eec7b5250b6b1
| 986
|
py
|
Python
|
env/Lib/site-packages/OpenGL/GLES2/EXT/texture_type_2_10_10_10_REV.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
env/Lib/site-packages/OpenGL/GLES2/EXT/texture_type_2_10_10_10_REV.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
env/Lib/site-packages/OpenGL/GLES2/EXT/texture_type_2_10_10_10_REV.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension EXT.texture_type_2_10_10_10_REV
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.texture_type_2_10_10_10_REV to provide a more
Python-friendly API
Overview (from the spec)
This extension adds a new texture data type, unsigned 2.10.10.10 ABGR,
which can be used with RGB or RGBA formats.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/texture_type_2_10_10_10_REV.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.texture_type_2_10_10_10_REV import *
from OpenGL.raw.GLES2.EXT.texture_type_2_10_10_10_REV import _EXTENSION_NAME
def glInitTextureType2101010RevEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 35.214286
| 76
| 0.813387
|
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.texture_type_2_10_10_10_REV import *
from OpenGL.raw.GLES2.EXT.texture_type_2_10_10_10_REV import _EXTENSION_NAME
def glInitTextureType2101010RevEXT():
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
| true
| true
|
f71a1e63deeffcfdc628570bf42b870b09678f9d
| 621
|
py
|
Python
|
debugprov/single_stepping.py
|
romerlrl/debugprov
|
3527f6a3fa623354777aaaed2616b6b3065f8304
|
[
"MIT"
] | 2
|
2019-09-26T17:46:12.000Z
|
2021-04-21T00:19:59.000Z
|
debugprov/single_stepping.py
|
romerlrl/debugprov
|
3527f6a3fa623354777aaaed2616b6b3065f8304
|
[
"MIT"
] | null | null | null |
debugprov/single_stepping.py
|
romerlrl/debugprov
|
3527f6a3fa623354777aaaed2616b6b3065f8304
|
[
"MIT"
] | 1
|
2020-09-22T20:37:19.000Z
|
2020-09-22T20:37:19.000Z
|
from debugprov.navgiation_strategy import NavigationStrategy
from debugprov.node import Node
from debugprov.validity import Validity
class SingleStepping(NavigationStrategy):
def navigate(self):
self.recursive_navigate(self.exec_tree.root_node)
self.finish_navigation()
return self.exec_tree
def recursive_navigate(self, current_node: Node):
if self.there_are_nodes_with_unknown_validity():
if current_node.has_childrens():
for c in current_node.childrens:
self.recursive_navigate(c)
self.evaluate(current_node)
| 34.5
| 60
| 0.706924
|
from debugprov.navgiation_strategy import NavigationStrategy
from debugprov.node import Node
from debugprov.validity import Validity
class SingleStepping(NavigationStrategy):
def navigate(self):
self.recursive_navigate(self.exec_tree.root_node)
self.finish_navigation()
return self.exec_tree
def recursive_navigate(self, current_node: Node):
if self.there_are_nodes_with_unknown_validity():
if current_node.has_childrens():
for c in current_node.childrens:
self.recursive_navigate(c)
self.evaluate(current_node)
| true
| true
|
f71a1e9ab3b466d5a052c9eb0a36e082154d5dbc
| 1,747
|
py
|
Python
|
igibson/robots/jr2_robot.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | 360
|
2020-04-02T11:12:09.000Z
|
2022-03-24T21:46:58.000Z
|
igibson/robots/jr2_robot.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | 169
|
2020-04-07T21:01:05.000Z
|
2022-03-31T10:07:39.000Z
|
igibson/robots/jr2_robot.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | 94
|
2020-04-09T23:22:17.000Z
|
2022-03-17T21:49:03.000Z
|
import gym
import numpy as np
from igibson.robots.robot_locomotor import LocomotorRobot
class JR2(LocomotorRobot):
"""
JR2 robot (no arm)
Reference: https://cvgl.stanford.edu/projects/jackrabbot/
Uses joint velocity control
"""
def __init__(self, config):
self.config = config
self.velocity = config.get("velocity", 1.0)
LocomotorRobot.__init__(
self,
"jr2_urdf/jr2.urdf",
action_dim=4,
scale=config.get("robot_scale", 1.0),
is_discrete=config.get("is_discrete", True),
control="velocity",
)
def set_up_continuous_action_space(self):
"""
Set up continuous action space
"""
self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
self.action_high = self.velocity * np.ones([self.action_dim])
self.action_low = -self.action_high
def set_up_discrete_action_space(self):
"""
Set up discrete action space
"""
self.action_list = [
[self.velocity, self.velocity, 0, self.velocity],
[-self.velocity, -self.velocity, 0, -self.velocity],
[self.velocity, -self.velocity, -self.velocity, 0],
[-self.velocity, self.velocity, self.velocity, 0],
[0, 0, 0, 0],
]
self.action_space = gym.spaces.Discrete(len(self.action_list))
self.setup_keys_to_action()
def setup_keys_to_action(self):
self.keys_to_action = {
(ord("w"),): 0, # forward
(ord("s"),): 1, # backward
(ord("d"),): 2, # turn right
(ord("a"),): 3, # turn left
(): 4,
}
| 31.196429
| 106
| 0.566113
|
import gym
import numpy as np
from igibson.robots.robot_locomotor import LocomotorRobot
class JR2(LocomotorRobot):
def __init__(self, config):
self.config = config
self.velocity = config.get("velocity", 1.0)
LocomotorRobot.__init__(
self,
"jr2_urdf/jr2.urdf",
action_dim=4,
scale=config.get("robot_scale", 1.0),
is_discrete=config.get("is_discrete", True),
control="velocity",
)
def set_up_continuous_action_space(self):
self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
self.action_high = self.velocity * np.ones([self.action_dim])
self.action_low = -self.action_high
def set_up_discrete_action_space(self):
self.action_list = [
[self.velocity, self.velocity, 0, self.velocity],
[-self.velocity, -self.velocity, 0, -self.velocity],
[self.velocity, -self.velocity, -self.velocity, 0],
[-self.velocity, self.velocity, self.velocity, 0],
[0, 0, 0, 0],
]
self.action_space = gym.spaces.Discrete(len(self.action_list))
self.setup_keys_to_action()
def setup_keys_to_action(self):
self.keys_to_action = {
(ord("w"),): 0,
(ord("s"),): 1,
(ord("d"),): 2,
(ord("a"),): 3,
(): 4,
}
| true
| true
|
f71a1fa441e506dab6e2238a62846f24b22db7ce
| 17,068
|
py
|
Python
|
Training_Raw_data_validation/rawValidation.py
|
teja-ambati1202/Insurance-Fraud-Detection
|
a9bbdd5a2af68e0e90f8e16ba43129bab709614b
|
[
"Apache-2.0"
] | null | null | null |
Training_Raw_data_validation/rawValidation.py
|
teja-ambati1202/Insurance-Fraud-Detection
|
a9bbdd5a2af68e0e90f8e16ba43129bab709614b
|
[
"Apache-2.0"
] | null | null | null |
Training_Raw_data_validation/rawValidation.py
|
teja-ambati1202/Insurance-Fraud-Detection
|
a9bbdd5a2af68e0e90f8e16ba43129bab709614b
|
[
"Apache-2.0"
] | 1
|
2022-03-27T09:02:29.000Z
|
2022-03-27T09:02:29.000Z
|
import sqlite3
from datetime import datetime
from os import listdir
import os
import re
import json
import shutil
import pandas as pd
from application_logging.logger import App_Logger
class Raw_Data_validation:
"""
This class shall be used for handling all the validation done on the Raw Training Data!!.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self,path):
self.Batch_Directory = path
self.schema_path = 'schema_training.json'
self.logger = App_Logger()
def valuesFromSchema(self):
"""
Method Name: valuesFromSchema
Description: This method extracts all the relevant information from the pre-defined "Schema" file.
Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns
On Failure: Raise ValueError,KeyError,Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
with open(self.schema_path, 'r') as f:
dic = json.load(f)
f.close()
pattern = dic['SampleFileName']
LengthOfDateStampInFile = dic['LengthOfDateStampInFile']
LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']
column_names = dic['ColName']
NumberofColumns = dic['NumberofColumns']
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
message ="LengthOfDateStampInFile:: %s" %LengthOfDateStampInFile + "\t" + "LengthOfTimeStampInFile:: %s" % LengthOfTimeStampInFile +"\t " + "NumberofColumns:: %s" % NumberofColumns + "\n"
self.logger.log(file,message)
file.close()
except ValueError:
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file,"ValueError:Value not found inside schema_training.json")
file.close()
raise ValueError
except KeyError:
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, "KeyError:Key value error incorrect key passed")
file.close()
raise KeyError
except Exception as e:
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, str(e))
file.close()
raise e
return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns
def manualRegexCreation(self):
"""
Method Name: manualRegexCreation
Description: This method contains a manually defined regex based on the "FileName" given in "Schema" file.
This Regex is used to validate the filename of the training data.
Output: Regex pattern
On Failure: None
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
regex = "['fraudDetection']+['\_'']+[\d_]+[\d]+\.csv"
return regex
def createDirectoryForGoodBadRawData(self):
"""
Method Name: createDirectoryForGoodBadRawData
Description: This method creates directories to store the Good Data and Bad Data
after validating the training data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = os.path.join("Training_Raw_files_validated/", "Good_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
path = os.path.join("Training_Raw_files_validated/", "Bad_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
except OSError as ex:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while creating Directory %s:" % ex)
file.close()
raise OSError
def deleteExistingGoodDataTrainingFolder(self):
"""
Method Name: deleteExistingGoodDataTrainingFolder
Description: This method deletes the directory made to store the Good Data
after loading the data in the table. Once the good files are
loaded in the DB,deleting the directory ensures space optimization.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Training_Raw_files_validated/'
# if os.path.isdir("ids/" + userName):
# if os.path.isdir(path + 'Bad_Raw/'):
# shutil.rmtree(path + 'Bad_Raw/')
if os.path.isdir(path + 'Good_Raw/'):
shutil.rmtree(path + 'Good_Raw/')
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"GoodRaw directory deleted successfully!!!")
file.close()
except OSError as s:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def deleteExistingBadDataTrainingFolder(self):
"""
Method Name: deleteExistingBadDataTrainingFolder
Description: This method deletes the directory made to store the bad Data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Training_Raw_files_validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"BadRaw directory deleted before starting validation!!!")
file.close()
except OSError as s:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def moveBadFilesToArchiveBad(self):
"""
Method Name: moveBadFilesToArchiveBad
Description: This method deletes the directory made to store the Bad Data
after moving the data in an archive folder. We archive the bad
files to send them back to the client for invalid data issue.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
now = datetime.now()
date = now.date()
time = now.strftime("%H%M%S")
try:
source = 'Training_Raw_files_validated/Bad_Raw/'
if os.path.isdir(source):
path = "TrainingArchiveBadData"
if not os.path.isdir(path):
os.makedirs(path)
dest = 'TrainingArchiveBadData/BadData_' + str(date)+"_"+str(time)
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(source)
for f in files:
if f not in os.listdir(dest):
shutil.move(source + f, dest)
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Bad files moved to archive")
path = 'Training_Raw_files_validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
self.logger.log(file,"Bad Raw Data Folder Deleted successfully!!")
file.close()
except Exception as e:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file, "Error while moving bad files to archive:: %s" % e)
file.close()
raise e
def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):
"""
Method Name: validationFileNameRaw
Description: This function validates the name of the training csv files as per given name in the schema!
Regex pattern is used to do the validation.If name format do not match the file is moved
to Bad Raw Data folder else in Good raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.
self.deleteExistingBadDataTrainingFolder()
self.deleteExistingGoodDataTrainingFolder()
#create new directories
self.createDirectoryForGoodBadRawData()
onlyfiles = [f for f in listdir(self.Batch_Directory)]
try:
f = open("Training_Logs/nameValidationLog.txt", 'a+')
for filename in onlyfiles:
if (re.match(regex, filename)):
splitAtDot = re.split('.csv', filename)
splitAtDot = (re.split('_', splitAtDot[0]))
if len(splitAtDot[1]) == LengthOfDateStampInFile:
if len(splitAtDot[2]) == LengthOfTimeStampInFile:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Good_Raw")
self.logger.log(f,"Valid File name!! File moved to GoodRaw Folder :: %s" % filename)
else:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f, "Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
f.close()
except Exception as e:
f = open("Training_Logs/nameValidationLog.txt", 'a+')
self.logger.log(f, "Error occured while validating FileName %s" % e)
f.close()
raise e
def validateColumnLength(self,NumberofColumns):
"""
Method Name: validateColumnLength
Description: This function validates the number of columns in the csv files.
It is should be same as given in the schema file.
If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.
If the column number matches, file is kept in Good Raw Data for processing.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Training_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f,"Column Length Validation Started!!")
for file in listdir('Training_Raw_files_validated/Good_Raw/'):
csv = pd.read_csv("Training_Raw_files_validated/Good_Raw/" + file)
if csv.shape[1] == NumberofColumns:
pass
else:
shutil.move("Training_Raw_files_validated/Good_Raw/" + file, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f, "Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
self.logger.log(f, "Column Length Validation Completed!!")
except OSError:
f = open("Training_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Training_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
def validateMissingValuesInWholeColumn(self):
"""
Method Name: validateMissingValuesInWholeColumn
Description: This function validates if any column in the csv file has all values missing.
If all the values are missing, the file is not suitable for processing.
SUch files are moved to bad raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Training_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f,"Missing Values Validation Started!!")
for file in listdir('Training_Raw_files_validated/Good_Raw/'):
csv = pd.read_csv("Training_Raw_files_validated/Good_Raw/" + file)
count = 0
for columns in csv:
if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):
count+=1
shutil.move("Training_Raw_files_validated/Good_Raw/" + file,
"Training_Raw_files_validated/Bad_Raw")
self.logger.log(f,"Invalid Column for the file!! File moved to Bad Raw Folder :: %s" % file)
break
if count==0:
csv.rename(columns={"Unnamed: 0": "Wafer"}, inplace=True)
csv.to_csv("Training_Raw_files_validated/Good_Raw/" + file, index=None, header=True)
except OSError:
f = open("Training_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Training_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
| 44.563969
| 200
| 0.489278
|
import sqlite3
from datetime import datetime
from os import listdir
import os
import re
import json
import shutil
import pandas as pd
from application_logging.logger import App_Logger
class Raw_Data_validation:
def __init__(self,path):
self.Batch_Directory = path
self.schema_path = 'schema_training.json'
self.logger = App_Logger()
def valuesFromSchema(self):
try:
with open(self.schema_path, 'r') as f:
dic = json.load(f)
f.close()
pattern = dic['SampleFileName']
LengthOfDateStampInFile = dic['LengthOfDateStampInFile']
LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']
column_names = dic['ColName']
NumberofColumns = dic['NumberofColumns']
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
message ="LengthOfDateStampInFile:: %s" %LengthOfDateStampInFile + "\t" + "LengthOfTimeStampInFile:: %s" % LengthOfTimeStampInFile +"\t " + "NumberofColumns:: %s" % NumberofColumns + "\n"
self.logger.log(file,message)
file.close()
except ValueError:
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file,"ValueError:Value not found inside schema_training.json")
file.close()
raise ValueError
except KeyError:
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, "KeyError:Key value error incorrect key passed")
file.close()
raise KeyError
except Exception as e:
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, str(e))
file.close()
raise e
return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns
def manualRegexCreation(self):
regex = "['fraudDetection']+['\_'']+[\d_]+[\d]+\.csv"
return regex
def createDirectoryForGoodBadRawData(self):
try:
path = os.path.join("Training_Raw_files_validated/", "Good_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
path = os.path.join("Training_Raw_files_validated/", "Bad_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
except OSError as ex:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while creating Directory %s:" % ex)
file.close()
raise OSError
def deleteExistingGoodDataTrainingFolder(self):
try:
path = 'Training_Raw_files_validated/'
# if os.path.isdir("ids/" + userName):
# if os.path.isdir(path + 'Bad_Raw/'):
# shutil.rmtree(path + 'Bad_Raw/')
if os.path.isdir(path + 'Good_Raw/'):
shutil.rmtree(path + 'Good_Raw/')
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"GoodRaw directory deleted successfully!!!")
file.close()
except OSError as s:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def deleteExistingBadDataTrainingFolder(self):
try:
path = 'Training_Raw_files_validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"BadRaw directory deleted before starting validation!!!")
file.close()
except OSError as s:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def moveBadFilesToArchiveBad(self):
now = datetime.now()
date = now.date()
time = now.strftime("%H%M%S")
try:
source = 'Training_Raw_files_validated/Bad_Raw/'
if os.path.isdir(source):
path = "TrainingArchiveBadData"
if not os.path.isdir(path):
os.makedirs(path)
dest = 'TrainingArchiveBadData/BadData_' + str(date)+"_"+str(time)
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(source)
for f in files:
if f not in os.listdir(dest):
shutil.move(source + f, dest)
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Bad files moved to archive")
path = 'Training_Raw_files_validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
self.logger.log(file,"Bad Raw Data Folder Deleted successfully!!")
file.close()
except Exception as e:
file = open("Training_Logs/GeneralLog.txt", 'a+')
self.logger.log(file, "Error while moving bad files to archive:: %s" % e)
file.close()
raise e
def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):
# delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.
self.deleteExistingBadDataTrainingFolder()
self.deleteExistingGoodDataTrainingFolder()
#create new directories
self.createDirectoryForGoodBadRawData()
onlyfiles = [f for f in listdir(self.Batch_Directory)]
try:
f = open("Training_Logs/nameValidationLog.txt", 'a+')
for filename in onlyfiles:
if (re.match(regex, filename)):
splitAtDot = re.split('.csv', filename)
splitAtDot = (re.split('_', splitAtDot[0]))
if len(splitAtDot[1]) == LengthOfDateStampInFile:
if len(splitAtDot[2]) == LengthOfTimeStampInFile:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Good_Raw")
self.logger.log(f,"Valid File name!! File moved to GoodRaw Folder :: %s" % filename)
else:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Training_Batch_Files/" + filename, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f, "Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
f.close()
except Exception as e:
f = open("Training_Logs/nameValidationLog.txt", 'a+')
self.logger.log(f, "Error occured while validating FileName %s" % e)
f.close()
raise e
def validateColumnLength(self,NumberofColumns):
try:
f = open("Training_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f,"Column Length Validation Started!!")
for file in listdir('Training_Raw_files_validated/Good_Raw/'):
csv = pd.read_csv("Training_Raw_files_validated/Good_Raw/" + file)
if csv.shape[1] == NumberofColumns:
pass
else:
shutil.move("Training_Raw_files_validated/Good_Raw/" + file, "Training_Raw_files_validated/Bad_Raw")
self.logger.log(f, "Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
self.logger.log(f, "Column Length Validation Completed!!")
except OSError:
f = open("Training_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Training_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
def validateMissingValuesInWholeColumn(self):
try:
f = open("Training_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f,"Missing Values Validation Started!!")
for file in listdir('Training_Raw_files_validated/Good_Raw/'):
csv = pd.read_csv("Training_Raw_files_validated/Good_Raw/" + file)
count = 0
for columns in csv:
if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):
count+=1
shutil.move("Training_Raw_files_validated/Good_Raw/" + file,
"Training_Raw_files_validated/Bad_Raw")
self.logger.log(f,"Invalid Column for the file!! File moved to Bad Raw Folder :: %s" % file)
break
if count==0:
csv.rename(columns={"Unnamed: 0": "Wafer"}, inplace=True)
csv.to_csv("Training_Raw_files_validated/Good_Raw/" + file, index=None, header=True)
except OSError:
f = open("Training_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Training_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
| true
| true
|
f71a1fb42d65587e922d09e984061b07a1aaed3f
| 122
|
py
|
Python
|
askci/plugins/pam_auth/__init__.py
|
hpsee/askci
|
ef1e2e75481b71db7fbe774cb81938055aa596d0
|
[
"MIT"
] | 3
|
2019-11-21T09:04:36.000Z
|
2019-11-23T13:29:43.000Z
|
askci/plugins/pam_auth/__init__.py
|
hpsee/askci
|
ef1e2e75481b71db7fbe774cb81938055aa596d0
|
[
"MIT"
] | 13
|
2019-11-21T20:28:23.000Z
|
2019-11-26T19:34:22.000Z
|
askci/plugins/pam_auth/__init__.py
|
hpsee/askci
|
ef1e2e75481b71db7fbe774cb81938055aa596d0
|
[
"MIT"
] | null | null | null |
AUTHENTICATION_BACKENDS = (
"django_pam.auth.backends.PAMBackend",
"django.contrib.auth.backends.ModelBackend",
)
| 24.4
| 48
| 0.754098
|
AUTHENTICATION_BACKENDS = (
"django_pam.auth.backends.PAMBackend",
"django.contrib.auth.backends.ModelBackend",
)
| true
| true
|
f71a227f18ed9f23f6798ac8a5fc17a955b9c0cb
| 3,870
|
py
|
Python
|
QCT/get_S_norm.py
|
inqlee0704/pyqct
|
304612ed558e7c46fe987ecfea8145cbc5721700
|
[
"MIT"
] | null | null | null |
QCT/get_S_norm.py
|
inqlee0704/pyqct
|
304612ed558e7c46fe987ecfea8145cbc5721700
|
[
"MIT"
] | null | null | null |
QCT/get_S_norm.py
|
inqlee0704/pyqct
|
304612ed558e7c46fe987ecfea8145cbc5721700
|
[
"MIT"
] | null | null | null |
# ##############################################################################
# Usage: python get_S_norm.py Subj I1 I2
# Time: ~ 20s
# Ref:
# ##############################################################################
# 20220118, In Kyu Lee
# No version suffix
# ##############################################################################
# v1c: 08/11/2021, In Kyu Lee
# - Fixed: when V_IN < V_EX, s_norm returns nan issue.
# - ownpow is used
# v1b: 08/10/2021, In Kyu Lee
# - S* stat is added
# 03/18/2021, In Kyu Lee
# Calculate S*
# ##############################################################################
# Input:
# - displacement img, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_disp_resample.mhd'
# - IN lobe mask, ex) PMSN03001_IN0_vida-lobes.img
# Output:
# - s* image, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_s_norm.img
# - s* stat, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_lobar_s_norm.txt
# ##############################################################################w
# import libraries
import os
import sys
import numpy as np
import time
import pandas as pd
from medpy.io import load, save
import SimpleITK as sitk
sitk.ProcessObject_SetGlobalWarningDisplay(False)
import warnings
warnings.filterwarnings("ignore")
def ownpow(a, b):
if a > 0:
return a**b
if a < 0:
temp = abs(a)**b
return -1*temp
start = time.time()
Subj = str(sys.argv[1]) # PMSN03001
I1 = str(sys.argv[2]) # 'IN0'
I2 = str(sys.argv[3]) # 'EX0'
disp_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_disp_resample.mhd'
histo_EX = pd.read_csv(f'{Subj}_{I2}_vida-histo.csv')
histo_IN = pd.read_csv(f'{Subj}_{I1}_vida-histo.csv')
s_norm_stat_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_lobar_s_norm.txt'
IN_lobe_path = f'{Subj}_{I1}_vida-lobes.img'
if not os.path.exists(IN_lobe_path):
IN_lobe_path = f'{Subj}_{I1}_vida-lobes.img.gz'
s_norm_img_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_s_norm.img'
# V_cm3_IN
V_EX = histo_EX.loc[histo_EX.location=='both', 'total-volume-cm3'].values[0]
V_IN = histo_IN.loc[histo_IN.location=='both', 'total-volume-cm3'].values[0]
# cm^3 -> mm^3
V_EX = V_EX * 1000
V_IN = V_IN * 1000
# Data Loading . . .
disp, disp_h = load(disp_path)
IN_lobe_img, IN_lobe_header = load(IN_lobe_path)
s_norm_h = disp_h
# [mm]
s = (disp[:,:,:,0]**2+disp[:,:,:,1]**2+disp[:,:,:,2]**2)**0.5
# This doesn't work if V_IN- V_EX is negative
# s_norm = s/((V_IN-V_EX)**(1/3))
s_norm = s/ownpow(V_IN-V_EX,1/3)
# Prep stat
s_norm_l0 = np.mean(s_norm[IN_lobe_img==8])
s_norm_l1 = np.mean(s_norm[IN_lobe_img==16])
s_norm_l2 = np.mean(s_norm[IN_lobe_img==32])
s_norm_l3 = np.mean(s_norm[IN_lobe_img==64])
s_norm_l4 = np.mean(s_norm[IN_lobe_img==128])
s_norm_mean = (s_norm_l0 + s_norm_l1 + s_norm_l2 + s_norm_l3 + s_norm_l4)/5
s_norm_l0_sd = np.std(s_norm[IN_lobe_img==8])
s_norm_l1_sd = np.std(s_norm[IN_lobe_img==16])
s_norm_l2_sd = np.std(s_norm[IN_lobe_img==32])
s_norm_l3_sd = np.std(s_norm[IN_lobe_img==64])
s_norm_l4_sd = np.std(s_norm[IN_lobe_img==128])
s_norm_sd = np.std(s_norm[IN_lobe_img!=0])
# CV = std/mean
s_norm_l0_cv = s_norm_l0_sd/s_norm_l0
s_norm_l1_cv = s_norm_l1_sd/s_norm_l1
s_norm_l2_cv = s_norm_l2_sd/s_norm_l2
s_norm_l3_cv = s_norm_l3_sd/s_norm_l3
s_norm_l4_cv = s_norm_l4_sd/s_norm_l4
s_norm_cv = s_norm_sd/s_norm_mean
s_norm_stat = pd.DataFrame({'Lobes':['Lobe0','Lobe1','Lobe2','Lobe3','Lobe4','All'],
'sStar_m':np.float16([s_norm_l0,s_norm_l1,s_norm_l2,s_norm_l3,s_norm_l4,s_norm_mean]),
'sStar_sd':np.float16([s_norm_l0_sd,s_norm_l1_sd,s_norm_l2_sd,s_norm_l3_sd,s_norm_l4_sd,s_norm_sd]),
'sStar_cv':np.float16([s_norm_l0_cv,s_norm_l1_cv,s_norm_l2_cv,s_norm_l3_cv,s_norm_l4_cv,s_norm_cv])})
# Save
save(s_norm,s_norm_img_path,hdr=s_norm_h)
s_norm_stat.to_csv(s_norm_stat_path, index=False, sep=' ')
end = time.time()
print(f'Elapsed time: {end-start}s')
| 35.181818
| 115
| 0.640052
| true
| true
|
|
f71a22b92bee8bbe5221f6a278525d912c8b3c92
| 577
|
py
|
Python
|
OLD THINGS/faceid_nabeel.py
|
AmirQadir/Auto-Object-Detection-and-Tracker
|
24c6f4d18b0496ef19250ccc42f53a7f1f42ed3f
|
[
"MIT"
] | 1
|
2019-05-30T00:59:18.000Z
|
2019-05-30T00:59:18.000Z
|
OLD THINGS/faceid_nabeel.py
|
AmirQadir/Auto-Object-Detection-and-Tracker
|
24c6f4d18b0496ef19250ccc42f53a7f1f42ed3f
|
[
"MIT"
] | null | null | null |
OLD THINGS/faceid_nabeel.py
|
AmirQadir/Auto-Object-Detection-and-Tracker
|
24c6f4d18b0496ef19250ccc42f53a7f1f42ed3f
|
[
"MIT"
] | null | null | null |
from FaceID import faceID
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv.imread('nabeel.jpg',0) # queryImage
img2 = cv.imread('nabeel_train.jpg',0) # trainImage
print(img1.shape)
rec = faceID()
print("constructor finished")
# crop_img_2 = getCroppedImage(rec,crop_img_2) accepts image in np arary
print(img1.shape)
img1 = cv.resize(img1,(100,100),interpolation=cv.INTER_AREA)
print(img1.shape)
img1 = rec.prewhiten2(img1)
print(img1.shape)
# print("whiten finished")
embeds = rec.getEmbed(img1)
# print("embedding finished")
| 23.08
| 72
| 0.743501
|
from FaceID import faceID
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv.imread('nabeel.jpg',0)
img2 = cv.imread('nabeel_train.jpg',0)
print(img1.shape)
rec = faceID()
print("constructor finished")
print(img1.shape)
img1 = cv.resize(img1,(100,100),interpolation=cv.INTER_AREA)
print(img1.shape)
img1 = rec.prewhiten2(img1)
print(img1.shape)
embeds = rec.getEmbed(img1)
| true
| true
|
f71a234f7d07452f93e0a92a0eb80a7ca5668a4f
| 5,007
|
py
|
Python
|
maps/tests/09.py
|
wayne-wang-1119/maps-project-cs88
|
ad330291042cd659142b1db4d5875fec5ebcfa90
|
[
"MIT"
] | null | null | null |
maps/tests/09.py
|
wayne-wang-1119/maps-project-cs88
|
ad330291042cd659142b1db4d5875fec5ebcfa90
|
[
"MIT"
] | null | null | null |
maps/tests/09.py
|
wayne-wang-1119/maps-project-cs88
|
ad330291042cd659142b1db4d5875fec5ebcfa90
|
[
"MIT"
] | null | null | null |
test = {
'name': 'Problem 9',
'points': 4,
'suites': [
{
'cases': [
{
'answer': 'restaurant names',
'choices': [
'restaurant names',
'restaurants',
'restaurant ratings'
],
'hidden': False,
'locked': False,
'question': 'rate_all returns a dictionary. What are the keys of this dictionary?'
},
{
'answer': 'numbers - a mix of user ratings and predicted ratings',
'choices': [
'numbers - a mix of user ratings and predicted ratings',
'numbers - user ratings only',
'numbers - predicted ratings only',
'numbers - mean restaurant ratings',
'lists - list of all restaurant ratings'
],
'hidden': False,
'locked': False,
'question': 'What are the values of the returned dictionary?'
},
{
'answer': 'a list of restaurants reviewed by the user',
'choices': [
'a list of restaurants reviewed by the user',
'a list of all possible restaurants',
'a list of ratings for restaurants reviewed by the user'
],
'hidden': False,
'locked': False,
'question': 'In rate_all, what does the variable reviewed represent?'
}
],
'scored': False,
'type': 'concept'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Mr. Mean Rating Minus One', [
... make_review('A', 3),
... make_review('B', 4),
... make_review('C', 1),
... ])
>>> cluster = [
... make_restaurant('A', [1, 2], [], 4, [
... make_review('A', 4),
... make_review('A', 4)
... ]),
... make_restaurant('B', [4, 2], [], 3, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 2)
... ]),
... make_restaurant('D', [4, 4], [], 3.5, [
... make_review('D', 2.5),
... make_review('D', 3.5),
... ]),
... ]
>>> restaurants = {restaurant_name(r): r for r in cluster}
>>> recommend.ALL_RESTAURANTS = cluster
>>> to_rate = cluster[2:]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> ratings = rate_all(user, to_rate, fns)
>>> type(ratings)
<class 'dict'>
>>> len(ratings) # Only the restaurants passed to rate_all
2
>>> ratings['C'] # A restaurant rated by the user (should be an integer)
1
>>> round(ratings['D'], 5) # A predicted rating (should be a decimal)
2.0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> import recommend
>>> from recommend import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Mr. Mean Rating Minus One', [
... make_review('A', 3),
... make_review('B', 4),
... make_review('C', 1),
... ])
>>> cluster = [
... make_restaurant('A', [1, 2], [], 4, [
... make_review('A', 4),
... make_review('A', 4)
... ]),
... make_restaurant('B', [4, 2], [], 3, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 2)
... ]),
... make_restaurant('D', [4, 4], [], 3.5, [
... make_review('D', 2.5),
... make_review('D', 3.5),
... ]),
... ]
>>> recommend.ALL_RESTAURANTS = cluster
>>> to_rate = cluster[2:]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> ratings = rate_all(user, to_rate, fns)
>>> type(ratings)
<class 'dict'>
>>> len(ratings) # Only the restaurants passed to rate_all
2
>>> ratings['C'] # A restaurant rated by the user (should be an integer)
1
>>> round(ratings['D'], 5) # A predicted rating (should be a decimal)
2.0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> import recommend
>>> test.swap_implementations(recommend)
>>> from recommend import *
""",
'teardown': r"""
>>> test.restore_implementations(recommend)
""",
'type': 'doctest'
}
]
}
| 32.512987
| 92
| 0.425205
|
test = {
'name': 'Problem 9',
'points': 4,
'suites': [
{
'cases': [
{
'answer': 'restaurant names',
'choices': [
'restaurant names',
'restaurants',
'restaurant ratings'
],
'hidden': False,
'locked': False,
'question': 'rate_all returns a dictionary. What are the keys of this dictionary?'
},
{
'answer': 'numbers - a mix of user ratings and predicted ratings',
'choices': [
'numbers - a mix of user ratings and predicted ratings',
'numbers - user ratings only',
'numbers - predicted ratings only',
'numbers - mean restaurant ratings',
'lists - list of all restaurant ratings'
],
'hidden': False,
'locked': False,
'question': 'What are the values of the returned dictionary?'
},
{
'answer': 'a list of restaurants reviewed by the user',
'choices': [
'a list of restaurants reviewed by the user',
'a list of all possible restaurants',
'a list of ratings for restaurants reviewed by the user'
],
'hidden': False,
'locked': False,
'question': 'In rate_all, what does the variable reviewed represent?'
}
],
'scored': False,
'type': 'concept'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Mr. Mean Rating Minus One', [
... make_review('A', 3),
... make_review('B', 4),
... make_review('C', 1),
... ])
>>> cluster = [
... make_restaurant('A', [1, 2], [], 4, [
... make_review('A', 4),
... make_review('A', 4)
... ]),
... make_restaurant('B', [4, 2], [], 3, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 2)
... ]),
... make_restaurant('D', [4, 4], [], 3.5, [
... make_review('D', 2.5),
... make_review('D', 3.5),
... ]),
... ]
>>> restaurants = {restaurant_name(r): r for r in cluster}
>>> recommend.ALL_RESTAURANTS = cluster
>>> to_rate = cluster[2:]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> ratings = rate_all(user, to_rate, fns)
>>> type(ratings)
<class 'dict'>
>>> len(ratings) # Only the restaurants passed to rate_all
2
>>> ratings['C'] # A restaurant rated by the user (should be an integer)
1
>>> round(ratings['D'], 5) # A predicted rating (should be a decimal)
2.0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> import recommend
>>> from recommend import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Mr. Mean Rating Minus One', [
... make_review('A', 3),
... make_review('B', 4),
... make_review('C', 1),
... ])
>>> cluster = [
... make_restaurant('A', [1, 2], [], 4, [
... make_review('A', 4),
... make_review('A', 4)
... ]),
... make_restaurant('B', [4, 2], [], 3, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 2)
... ]),
... make_restaurant('D', [4, 4], [], 3.5, [
... make_review('D', 2.5),
... make_review('D', 3.5),
... ]),
... ]
>>> recommend.ALL_RESTAURANTS = cluster
>>> to_rate = cluster[2:]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> ratings = rate_all(user, to_rate, fns)
>>> type(ratings)
<class 'dict'>
>>> len(ratings) # Only the restaurants passed to rate_all
2
>>> ratings['C'] # A restaurant rated by the user (should be an integer)
1
>>> round(ratings['D'], 5) # A predicted rating (should be a decimal)
2.0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> import recommend
>>> test.swap_implementations(recommend)
>>> from recommend import *
""",
'teardown': r"""
>>> test.restore_implementations(recommend)
""",
'type': 'doctest'
}
]
}
| true
| true
|
f71a245fa32058c020191858dd725ba966da6364
| 728
|
py
|
Python
|
unstar_github.py
|
ashwinvis/zotero-tools
|
fa4ede2382ba6d462325b7cb08c66575cf87ce20
|
[
"Apache-2.0"
] | null | null | null |
unstar_github.py
|
ashwinvis/zotero-tools
|
fa4ede2382ba6d462325b7cb08c66575cf87ce20
|
[
"Apache-2.0"
] | null | null | null |
unstar_github.py
|
ashwinvis/zotero-tools
|
fa4ede2382ba6d462325b7cb08c66575cf87ce20
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import random
import time
from pygithub import Github
# Ref:
# https://pygithub.readthedocs.io/en/latest/introduction.html#very-short-tutorial
# If you are using an access token to circumvent 2FA, make sure you have
# enabled "repo" scope
g = Github("username", "password")
me = g.get_user()
starred = me.get_starred()
for repo in starred:
print("Unstarring", repo)
me.remove_from_starred(repo)
time.sleep(1 + random.random()) # try to avoid rate-limit
# Troubleshooting
# https://developer.github.com/v3/activity/starring/#unstar-a-repository
# Debug using curl:
# $ curl -H "Authorization: token $INSERT_ACCESS_TOKEN" \
# "https://api.github.com/user/starred/<owner>/<repo>" -i -s -X DELETE
| 30.333333
| 81
| 0.725275
|
import random
import time
from pygithub import Github
ame", "password")
me = g.get_user()
starred = me.get_starred()
for repo in starred:
print("Unstarring", repo)
me.remove_from_starred(repo)
time.sleep(1 + random.random())
| true
| true
|
f71a24882b5c3b3d085f16743970960081031c33
| 1,508
|
py
|
Python
|
conda_tools/pack_non_conda.py
|
Amber-MD/ambertools-binary-build
|
257f25cfbe829ee080807c6086d6edf8ec78c534
|
[
"MIT"
] | 4
|
2018-12-02T19:43:52.000Z
|
2019-12-14T01:15:50.000Z
|
conda_tools/pack_non_conda.py
|
Amber-MD/ambertools-binary-build
|
257f25cfbe829ee080807c6086d6edf8ec78c534
|
[
"MIT"
] | 15
|
2017-09-03T03:37:27.000Z
|
2020-10-07T15:19:56.000Z
|
conda_tools/pack_non_conda.py
|
Amber-MD/ambertools-binary-build
|
257f25cfbe829ee080807c6086d6edf8ec78c534
|
[
"MIT"
] | 1
|
2021-06-01T19:18:54.000Z
|
2021-06-01T19:18:54.000Z
|
# Aim: Mostly for phenix users and those don't like using Miniconda
# 1. wget url_to_tar_file.tar
# 2. tar -xf url_to_tar_file.tar
# 3. source amber17/ambersh
# 4. Just it
""" Usage example: python pack_non_conda.py ambertools-17.0.1-py27_1.tar.bz2
Note: You can use file pattern
This script will unpack that bz2 file, then do some editing, then pack it to ./non-conda-install folder.
This should be done after doing conda-build
"""
import os
import subprocess
from glob import glob
import argparse
# local file, in the same folder as this script
from edit_package import editing_conda_package
import update_shebang
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tarfile', nargs='?', help='targer file')
parser.add_argument(
"--output-dir",
type=str,
default='./non-conda-install',
dest="output_dir",
help="output directory")
parser.add_argument(
"--date", action="store_true", help="Add date to output tarfile")
parser.add_argument("-d", "--dry_run", action="store_true", help="dry run")
opt = parser.parse_args()
pack_non_conda_package(opt)
def pack_non_conda_package(opt):
with editing_conda_package(
opt.tarfile,
output_dir=opt.output_dir,
add_date=opt.date,
dry_run=opt.dry_run):
update_shebang.update_python_env('./bin/')
# No need to copy here since we alread done in conda build step?
if __name__ == '__main__':
main()
| 27.925926
| 104
| 0.68634
|
# 1. wget url_to_tar_file.tar
# 2. tar -xf url_to_tar_file.tar
# 3. source amber17/ambersh
# 4. Just it
import os
import subprocess
from glob import glob
import argparse
# local file, in the same folder as this script
from edit_package import editing_conda_package
import update_shebang
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tarfile', nargs='?', help='targer file')
parser.add_argument(
"--output-dir",
type=str,
default='./non-conda-install',
dest="output_dir",
help="output directory")
parser.add_argument(
"--date", action="store_true", help="Add date to output tarfile")
parser.add_argument("-d", "--dry_run", action="store_true", help="dry run")
opt = parser.parse_args()
pack_non_conda_package(opt)
def pack_non_conda_package(opt):
with editing_conda_package(
opt.tarfile,
output_dir=opt.output_dir,
add_date=opt.date,
dry_run=opt.dry_run):
update_shebang.update_python_env('./bin/')
# No need to copy here since we alread done in conda build step?
if __name__ == '__main__':
main()
| true
| true
|
f71a24ca46c0edd3de051b4f157eaa8487ab5b5d
| 2,561
|
py
|
Python
|
remoteSwitch/lib/rotation.py
|
zkity/remoteSwitch
|
1b66baab87c81a9b79de7b161173fb0c75c03291
|
[
"MIT"
] | 1
|
2021-02-19T11:24:41.000Z
|
2021-02-19T11:24:41.000Z
|
remoteSwitch/lib/rotation.py
|
zkity/remoteSwitch
|
1b66baab87c81a9b79de7b161173fb0c75c03291
|
[
"MIT"
] | null | null | null |
remoteSwitch/lib/rotation.py
|
zkity/remoteSwitch
|
1b66baab87c81a9b79de7b161173fb0c75c03291
|
[
"MIT"
] | null | null | null |
'''
这段代码源于网上
原文请见 https://my.oschina.net/hechunc/blog/3020284
'''
import RPi.GPIO as GPIO
import time
# 这个类表示单个的SG90模块
class Rotation:
frequency=50 #脉冲频率(Hz)
delta_theta=0.2 #步进转动间隔(度)
min_delay=0.0006 #转动delta_theta的理论耗时(s)
max_delay=0.4 #从0转到180的耗时(s)
def __init__(self,channel,min_theta,max_theta,init_theta=0):
'''
构造函数:
channel: 舵机信号线所连接的树莓派引脚编号(BCM编码)
min_theta: 舵机转动的最小角度
max_theta: 舵机转动的最大角度
init_theta: 舵机的初始角度
'''
self.channel=channel
if(min_theta<0 or min_theta>180):
self.min_theta=0
else:
self.min_theta=min_theta
if(max_theta<0 or max_theta>180):
self.max_theta=180
else:
self.max_theta=max_theta
if(init_theta<min_theta or init_theta>max_theta):
self.init_theta=(self.min_theta+self.max_theta)/2
else:
self.init_theta=init_theta #初始角度
#计算最小角度、最大角度的占空比
self.min_dutycycle=2.5+self.min_theta*10/180
self.max_dutycycle=2.5+self.max_theta*10/180
def setup(self):
'''
初始化
'''
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.channel,GPIO.OUT)
self.pwm=GPIO.PWM(self.channel,Rotation.frequency) #PWM
self.dutycycle=2.5+self.init_theta*10/180 #脉冲占空比的初始值
self.pwm.start(self.dutycycle) #让舵机转到初始位置
time.sleep(Rotation.max_delay)
def positiveRotation(self):
'''
正相步进转动,每次调用只转动delta_theta度
'''
self.dutycycle=self.dutycycle+Rotation.delta_theta*10/180
if self.dutycycle>self.max_dutycycle:
self.dutycycle=self.max_dutycycle
self.pwm.ChangeDutyCycle(self.dutycycle)
time.sleep(Rotation.min_delay)
def reverseRotation(self):
'''
反相转动,每次调用只转动delta_theta度
'''
self.dutycycle=self.dutycycle-Rotation.delta_theta*10/180
if self.dutycycle<self.min_dutycycle:
self.dutycycle=self.min_dutycycle
self.pwm.ChangeDutyCycle(self.dutycycle)
time.sleep(Rotation.min_delay)
def specifyRotation(self,theta):
'''
转动到指定的角度
'''
if(theta<0 or theta>180):
return
self.dutycycle=2.5+theta*10/180
self.pwm.ChangeDutyCycle(self.dutycycle)
time.sleep(Rotation.max_delay)
def cleanup(self):
self.pwm.stop()
time.sleep(Rotation.min_delay)
GPIO.cleanup()
| 28.455556
| 65
| 0.609137
|
import RPi.GPIO as GPIO
import time
class Rotation:
frequency=50
delta_theta=0.2
min_delay=0.0006
max_delay=0.4
def __init__(self,channel,min_theta,max_theta,init_theta=0):
self.channel=channel
if(min_theta<0 or min_theta>180):
self.min_theta=0
else:
self.min_theta=min_theta
if(max_theta<0 or max_theta>180):
self.max_theta=180
else:
self.max_theta=max_theta
if(init_theta<min_theta or init_theta>max_theta):
self.init_theta=(self.min_theta+self.max_theta)/2
else:
self.init_theta=init_theta
self.min_dutycycle=2.5+self.min_theta*10/180
self.max_dutycycle=2.5+self.max_theta*10/180
def setup(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.channel,GPIO.OUT)
self.pwm=GPIO.PWM(self.channel,Rotation.frequency)
self.dutycycle=2.5+self.init_theta*10/180
self.pwm.start(self.dutycycle)
time.sleep(Rotation.max_delay)
def positiveRotation(self):
self.dutycycle=self.dutycycle+Rotation.delta_theta*10/180
if self.dutycycle>self.max_dutycycle:
self.dutycycle=self.max_dutycycle
self.pwm.ChangeDutyCycle(self.dutycycle)
time.sleep(Rotation.min_delay)
def reverseRotation(self):
self.dutycycle=self.dutycycle-Rotation.delta_theta*10/180
if self.dutycycle<self.min_dutycycle:
self.dutycycle=self.min_dutycycle
self.pwm.ChangeDutyCycle(self.dutycycle)
time.sleep(Rotation.min_delay)
def specifyRotation(self,theta):
if(theta<0 or theta>180):
return
self.dutycycle=2.5+theta*10/180
self.pwm.ChangeDutyCycle(self.dutycycle)
time.sleep(Rotation.max_delay)
def cleanup(self):
self.pwm.stop()
time.sleep(Rotation.min_delay)
GPIO.cleanup()
| true
| true
|
f71a2762ffafdc8fa41231f81f930197ee062c98
| 15,596
|
py
|
Python
|
trainer.py
|
a-maumau/pixel_objectness.pytorch
|
f5acb972be694662d839b99eb33e66a807d6031e
|
[
"MIT"
] | 4
|
2018-10-28T14:44:24.000Z
|
2019-10-27T11:27:12.000Z
|
trainer.py
|
a-maumau/pixel_objectness.pytorch
|
f5acb972be694662d839b99eb33e66a807d6031e
|
[
"MIT"
] | 2
|
2019-05-10T15:01:45.000Z
|
2019-10-11T09:47:51.000Z
|
trainer.py
|
a-maumau/pixel_objectness.pytorch
|
f5acb972be694662d839b99eb33e66a807d6031e
|
[
"MIT"
] | null | null | null |
import os
import math
import argparse
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from PIL import Image
import data_loader
from mau_ml_util.train_logger import TrainLogger
#from mau_ml_util.metric import SegmentationMetric
from metric_from_latest_mmu import SegmentationMetric
from templates import Template_Trainer
torch.backends.cudnn.benchmark = True
class ColorMap(object):
def __init__(self, base_color=[[0,0,1], [0,1,1], [0,1,0], [1,1,0], [1,0,0]]):
"""
color_points: list of [int, int, int]
each value of component represent R,G,B.
"""
self.base_color = base_color
self.num_color_min1 = len(self.base_color)-1
def __call__(self, val):
return self.to_colormap(val)
def to_colormap(self, val):
"""
returns tpule of (R,G,B) value in range [0,1].
"""
fract_between = 0
if val <= 0:
idx1 = idx2 = 0
elif val >= 1:
idx1 = idx2 = self.num_color_min1
else:
val = val * (self.num_color_min1)
idx1 = math.floor(val);
idx2 = idx1+1;
fract_between = val - idx1
r = (self.base_color[idx2][0] - self.base_color[idx1][0])*fract_between + self.base_color[idx1][0]
g = (self.base_color[idx2][1] - self.base_color[idx1][1])*fract_between + self.base_color[idx1][1]
b = (self.base_color[idx2][2] - self.base_color[idx1][2])*fract_between + self.base_color[idx1][2]
return (r,g,b)
class Trainer_PixelObjectness(Template_Trainer):
def __init__(self, args, model, optimizer, lr_policy):
self.args = args
self.lr_policy = lr_policy
self.iter_wise = self.lr_policy.iteration_wise
# for loggin the training
val_head = ["iter" if self.iter_wise else "epoch", "mean_pixel_accuracy"]
for i in range(self.args.class_num):
val_head.append("mean_precision_class_{}".format(i))
for i in range(self.args.class_num):
val_head.append("mean_IoU_class_{}".format(i))
self.tlog = self.get_train_logger({"train":["iter" if self.iter_wise else "epoch", "batch_mean_total_loss"], "val":val_head},
save_dir=self.args.save_dir, save_name=self.args.save_name, arguments=self.get_argparse_arguments(self.args),
use_http_server=self.args.use_http_server, use_msg_server=self.args.use_msg_server, notificate=False,
visualize_fetch_stride=self.args.viz_fetch_stride, http_port=self.args.http_server_port, msg_port=self.args.msg_server_port)
# paths
self.save_dir = self.tlog.log_save_path
self.model_param_dir = self.tlog.mkdir("model_param")
if torch.cuda.is_available() and not self.args.nogpu:
self.map_device = torch.device('cuda:{}'.format(self.args.gpu_device_num))
else:
self.map_device = torch.device('cpu')
self.model = model
if torch.cuda.is_available() and not args.nogpu:
self.model = self.model.to(self.map_device)
self.optimizer = optimizer
self.train_loader = data_loader.get_train_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])#[(0.485, 0.456, 0.406),(0.229, 0.224, 0.225)])
self.val_loader = data_loader.get_val_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])
self.cmap = self._gen_cmap()
if self.args.show_parameters:
for idx, m in enumerate(model.modules()):
print(idx, '->', m)
print(args)
print("\nsaving at {}\n".format(self.save_dir))
# PASCAL VOC color maps
# borrowed from https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae
def _gen_cmap_voc(self, class_num=255):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
cmap = np.zeros((class_num+1, 3), dtype='uint8')
for i in range(class_num+1):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
return cmap
def _gen_cmap(self, max_value=255):
mapper = ColorMap()
cmap = []
for v in range(max_value+1):
cmap.append(np.uint8(np.array(mapper(v/max_value))*255))
return cmap
def convert_to_color_map(self, img_array, color_map=None, class_num=255):
"""
img_array: numpy.ndarray
shape must be (width, height)
"""
if color_map is None:
color_map = self._gen_cmap()
new_img = np.empty(shape=(img_array.shape[0], img_array.shape[1], 3), dtype='uint8')
for c in range(class_num+1):
index = np.where(img_array == c)
new_img[index] = color_map[c]
return new_img
def validate(self, count):
with torch.no_grad():
self.model.eval()
# logging
pix_acc = 0.0
precision_class = []
jaccard_class = []
#data_count_precision = [0 for i in range(self.args.class_num)]
#data_count_jaccard = [0 for i in range(self.args.class_num)]
metric = SegmentationMetric(self.args.class_num, map_device=self.map_device)
if self.args.quiet:
_trainval_loader = self.val_loader
else:
_trainval_loader = self.to_tqdm(self.val_loader, desc="train val")
for b, (image, mask, original_image) in enumerate(_trainval_loader):
batch_size = image.shape[0]
img = self.format_tensor(image, requires_grad=False, map_device=self.map_device)
mask = self.format_tensor(mask, requires_grad=False, map_device=self.map_device)
outputs, prob_maps = self.model.inference(img)
outputs = F.interpolate(outputs, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)
prob_maps = F.interpolate(prob_maps, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)
metric(outputs, mask)
# save only few batch for sample
if b < 1:
self.tlog.setup_output("{}_{}_batch_{}_sample".format("iter" if self.iter_wise else "epoch", count, b))
# test color image
#test_img = np.ones((256,256))
#for i in range(256):
# test_img[i] = test_img[i]*i
#
#self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(test_img))))
for n in range(batch_size):
self.tlog.pack_output(Image.fromarray(np.uint8(original_image[n].detach().numpy())))
pred_img = np.uint8(outputs[n].squeeze(0).cpu().detach().numpy())
prob_img = prob_maps[n].squeeze(0).cpu().detach().numpy()
self.tlog.pack_output(Image.fromarray(pred_img*255), not_in_schema=True)
self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(prob_img[1]*255), self.cmap)))
gt_img = np.uint8(mask[n].cpu().detach().numpy())
self.tlog.pack_output(Image.fromarray(gt_img*255), not_in_schema=True)
self.tlog.pack_output(None, " ")
self.tlog.pack_output(None, "validation sample", ["left: input", "center: pred cmap", "right: output mask"])
self.tlog.flush_output()
pix_acc = metric.calc_pix_acc()
precision = metric.calc_mean_precision()
jaccard_index = metric.calc_mean_jaccard_index()
# might I should return the non evaluated class with nan and filter the list
# by filter(lambda n: n!=float("nan"), items)
for class_id in range(self.args.class_num):
precision_class.append(precision["class_{}".format(class_id)])
jaccard_class.append(jaccard_index["class_{}".format(class_id)])
#data_count_precision[class_id] += len(precision["class_{}".format(str(class_id))])
#data_count_jaccard[class_id] += len(jaccard_index["class_{}".format(str(class_id))])
# logging, this implementation is not caring missing value
#mean_precision_classes = [y/x if x > 0 else 0 for y, x in zip(precision_class, data_count_precision)]
#mean_iou_classes = [y/x if x > 0 else 0 for y, x in zip(jaccard_class, data_count_jaccard)]
# clac. with out background
log_msg_data = [count, pix_acc, np.mean(precision_class[1:]), np.mean(jaccard_class[1:])]
self.tlog.log("val", [count, pix_acc]+precision_class+jaccard_class)
self.tlog.log_message("[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}".format(*log_msg_data), "LOG", "validation")
if not self.args.quiet:
tqdm.write("[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}".format(*log_msg_data))
self.model.train()
def train(self):
train_finish = False
if self.args.quiet:
epochs = range(1, self.args.epochs+1)
else:
epochs = self.to_tqdm(range(1, self.args.epochs+1), desc="train")
curr_iter = 0
epoch = 0
total_loss = 0.0
data_num = 0
# for epoch wise and iter wise
decay_arg = {"curr_iter":curr_iter, "curr_epoch":epoch}
for epoch in epochs:
if not self.iter_wise:
total_loss = 0.0
data_num = 0
if self.args.quiet:
_train_loader = self.train_loader
else:
_train_loader = self.to_tqdm(self.train_loader)
for img, mask in _train_loader:
# loss log will be showed in size averaged
data_num += 1
self.optimizer.zero_grad()
images = self.format_tensor(img, map_device=self.map_device)
masks = self.format_tensor(mask, map_device=self.map_device)
output = self.model(images)
output = F.interpolate(output, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)
batch_loss = self.model.loss(output, masks)
total_loss += batch_loss.item()
batch_loss.backward()
self.optimizer.step()
curr_iter += 1
if not self.args.quiet:
_train_loader.set_description("{: 3d}: train[{}] loss: {:.5f}".format(curr_iter if self.iter_wise else epoch, self.args.save_name, total_loss/data_num))
if self.iter_wise:
self.lr_policy.decay_lr(**decay_arg)
if curr_iter % self.args.trainval_every == 0:
self.validate(curr_iter)
if curr_iter % self.args.save_every == 0:
state = {'iter': curr_iter,
'optimizer_state_dict' : self.optimizer.state_dict()}
self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_iter{}.pth'.format(curr_iter)))
self.tlog.log_message("[iter:{}] model saved.".format(curr_iter), "LOG", "train")
if curr_iter % self.args.log_every == 0:
if not self.args.quiet:
tqdm.write("[#{: 3d}] {} iter mean loss: {:.5f}".format(curr_iter, self.args.log_every, total_loss/data_num))
self.tlog.log("train", [curr_iter, float(total_loss/data_num)])
self.tlog.log_message("[{}] {} iter mean loss:{:.5f}".format("iter:{}".format(curr_iter), self.args.log_every, float(total_loss/data_num)), "LOG", "train")
total_loss = 0
data_num = 0
if curr_iter == self.args.max_iter:
train_finish = True
_train_loader.close()
break
if train_finish:
epochs.close()
break
if not self.iter_wise:
if not self.args.quiet:
tqdm.write("[# {: 3d}] batch mean loss: {:.5f}".format(epoch, total_loss/data_num))
if epoch % self.args.log_every == 0:
self.tlog.log("train", [epoch, float(total_loss/data_num)])
self.tlog.log_message("[{}] batch mean loss:{:.5f}".format("epoch:{}".format(epoch), float(total_loss/data_num)), "LOG", "train")
# check train validation
if epoch % self.args.trainval_every == 0:
self.validate(epoch)
self.lr_policy.decay_lr(**decay_arg)
#if epoch % self.args.decay_every == 0:
# for param_group in self.optimizer.param_groups:
# param_group['lr'] *= self.args.decay_value
#
# self.tlog.log_message("[epoch:{}] decay learning rate by {}".format(epoch, self.args.decay_value), "LOG", "train")
# save model
if epoch % self.args.save_every == 0:
state = {'epoch': epoch,
'optimizer_state_dict' : self.optimizer.state_dict()}
self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_e{}.pth'.format(epoch)))
self.tlog.log_message("[epoch:{}] model saved.".format(epoch), "LOG", "train")
self.model.save(add_state={'optimizer_state_dict' : self.optimizer.state_dict()},
file_name=os.path.join(self.model_param_dir, 'model_param_fin_{}.pth'.format(datetime.now().strftime("%Y%m%d_%H-%M-%S"))))
print("data is saved at {}".format(self.save_dir))
def test_loader(self):
from matplotlib import pylab as plt
import time
if self.args.quiet:
epochs = range(1, self.args.epochs+1)
else:
epochs = self.to_tqdm(range(1, self.args.epochs+1), desc="train")
for epoch in epochs:
if self.args.quiet:
_train_loader = self.train_loader
else:
_train_loader = self.to_tqdm(self.train_loader)
for img, mask in _train_loader:
batch_size = img.shape[0]
img = img.numpy()
mask = mask.numpy()
for i in range(batch_size):
_img = np.uint8(img[i]*255).transpose(1,2,0)
_mask = self.convert_to_color_map(np.uint8(mask[i]), self.cmap)
merged_img = np.concatenate([_img, _mask], axis=1)
plt.imshow(merged_img)
plt.show()
| 40.934383
| 179
| 0.55604
|
import os
import math
import argparse
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from PIL import Image
import data_loader
from mau_ml_util.train_logger import TrainLogger
from metric_from_latest_mmu import SegmentationMetric
from templates import Template_Trainer
torch.backends.cudnn.benchmark = True
class ColorMap(object):
def __init__(self, base_color=[[0,0,1], [0,1,1], [0,1,0], [1,1,0], [1,0,0]]):
self.base_color = base_color
self.num_color_min1 = len(self.base_color)-1
def __call__(self, val):
return self.to_colormap(val)
def to_colormap(self, val):
fract_between = 0
if val <= 0:
idx1 = idx2 = 0
elif val >= 1:
idx1 = idx2 = self.num_color_min1
else:
val = val * (self.num_color_min1)
idx1 = math.floor(val);
idx2 = idx1+1;
fract_between = val - idx1
r = (self.base_color[idx2][0] - self.base_color[idx1][0])*fract_between + self.base_color[idx1][0]
g = (self.base_color[idx2][1] - self.base_color[idx1][1])*fract_between + self.base_color[idx1][1]
b = (self.base_color[idx2][2] - self.base_color[idx1][2])*fract_between + self.base_color[idx1][2]
return (r,g,b)
class Trainer_PixelObjectness(Template_Trainer):
def __init__(self, args, model, optimizer, lr_policy):
self.args = args
self.lr_policy = lr_policy
self.iter_wise = self.lr_policy.iteration_wise
val_head = ["iter" if self.iter_wise else "epoch", "mean_pixel_accuracy"]
for i in range(self.args.class_num):
val_head.append("mean_precision_class_{}".format(i))
for i in range(self.args.class_num):
val_head.append("mean_IoU_class_{}".format(i))
self.tlog = self.get_train_logger({"train":["iter" if self.iter_wise else "epoch", "batch_mean_total_loss"], "val":val_head},
save_dir=self.args.save_dir, save_name=self.args.save_name, arguments=self.get_argparse_arguments(self.args),
use_http_server=self.args.use_http_server, use_msg_server=self.args.use_msg_server, notificate=False,
visualize_fetch_stride=self.args.viz_fetch_stride, http_port=self.args.http_server_port, msg_port=self.args.msg_server_port)
self.save_dir = self.tlog.log_save_path
self.model_param_dir = self.tlog.mkdir("model_param")
if torch.cuda.is_available() and not self.args.nogpu:
self.map_device = torch.device('cuda:{}'.format(self.args.gpu_device_num))
else:
self.map_device = torch.device('cpu')
self.model = model
if torch.cuda.is_available() and not args.nogpu:
self.model = self.model.to(self.map_device)
self.optimizer = optimizer
self.train_loader = data_loader.get_train_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])
self.val_loader = data_loader.get_val_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])
self.cmap = self._gen_cmap()
if self.args.show_parameters:
for idx, m in enumerate(model.modules()):
print(idx, '->', m)
print(args)
print("\nsaving at {}\n".format(self.save_dir))
def _gen_cmap_voc(self, class_num=255):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
cmap = np.zeros((class_num+1, 3), dtype='uint8')
for i in range(class_num+1):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
return cmap
def _gen_cmap(self, max_value=255):
mapper = ColorMap()
cmap = []
for v in range(max_value+1):
cmap.append(np.uint8(np.array(mapper(v/max_value))*255))
return cmap
def convert_to_color_map(self, img_array, color_map=None, class_num=255):
if color_map is None:
color_map = self._gen_cmap()
new_img = np.empty(shape=(img_array.shape[0], img_array.shape[1], 3), dtype='uint8')
for c in range(class_num+1):
index = np.where(img_array == c)
new_img[index] = color_map[c]
return new_img
def validate(self, count):
with torch.no_grad():
self.model.eval()
pix_acc = 0.0
precision_class = []
jaccard_class = []
metric = SegmentationMetric(self.args.class_num, map_device=self.map_device)
if self.args.quiet:
_trainval_loader = self.val_loader
else:
_trainval_loader = self.to_tqdm(self.val_loader, desc="train val")
for b, (image, mask, original_image) in enumerate(_trainval_loader):
batch_size = image.shape[0]
img = self.format_tensor(image, requires_grad=False, map_device=self.map_device)
mask = self.format_tensor(mask, requires_grad=False, map_device=self.map_device)
outputs, prob_maps = self.model.inference(img)
outputs = F.interpolate(outputs, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)
prob_maps = F.interpolate(prob_maps, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)
metric(outputs, mask)
if b < 1:
self.tlog.setup_output("{}_{}_batch_{}_sample".format("iter" if self.iter_wise else "epoch", count, b))
for n in range(batch_size):
self.tlog.pack_output(Image.fromarray(np.uint8(original_image[n].detach().numpy())))
pred_img = np.uint8(outputs[n].squeeze(0).cpu().detach().numpy())
prob_img = prob_maps[n].squeeze(0).cpu().detach().numpy()
self.tlog.pack_output(Image.fromarray(pred_img*255), not_in_schema=True)
self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(prob_img[1]*255), self.cmap)))
gt_img = np.uint8(mask[n].cpu().detach().numpy())
self.tlog.pack_output(Image.fromarray(gt_img*255), not_in_schema=True)
self.tlog.pack_output(None, " ")
self.tlog.pack_output(None, "validation sample", ["left: input", "center: pred cmap", "right: output mask"])
self.tlog.flush_output()
pix_acc = metric.calc_pix_acc()
precision = metric.calc_mean_precision()
jaccard_index = metric.calc_mean_jaccard_index()
for class_id in range(self.args.class_num):
precision_class.append(precision["class_{}".format(class_id)])
jaccard_class.append(jaccard_index["class_{}".format(class_id)])
log_msg_data = [count, pix_acc, np.mean(precision_class[1:]), np.mean(jaccard_class[1:])]
self.tlog.log("val", [count, pix_acc]+precision_class+jaccard_class)
self.tlog.log_message("[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}".format(*log_msg_data), "LOG", "validation")
if not self.args.quiet:
tqdm.write("[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}".format(*log_msg_data))
self.model.train()
def train(self):
train_finish = False
if self.args.quiet:
epochs = range(1, self.args.epochs+1)
else:
epochs = self.to_tqdm(range(1, self.args.epochs+1), desc="train")
curr_iter = 0
epoch = 0
total_loss = 0.0
data_num = 0
decay_arg = {"curr_iter":curr_iter, "curr_epoch":epoch}
for epoch in epochs:
if not self.iter_wise:
total_loss = 0.0
data_num = 0
if self.args.quiet:
_train_loader = self.train_loader
else:
_train_loader = self.to_tqdm(self.train_loader)
for img, mask in _train_loader:
data_num += 1
self.optimizer.zero_grad()
images = self.format_tensor(img, map_device=self.map_device)
masks = self.format_tensor(mask, map_device=self.map_device)
output = self.model(images)
output = F.interpolate(output, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)
batch_loss = self.model.loss(output, masks)
total_loss += batch_loss.item()
batch_loss.backward()
self.optimizer.step()
curr_iter += 1
if not self.args.quiet:
_train_loader.set_description("{: 3d}: train[{}] loss: {:.5f}".format(curr_iter if self.iter_wise else epoch, self.args.save_name, total_loss/data_num))
if self.iter_wise:
self.lr_policy.decay_lr(**decay_arg)
if curr_iter % self.args.trainval_every == 0:
self.validate(curr_iter)
if curr_iter % self.args.save_every == 0:
state = {'iter': curr_iter,
'optimizer_state_dict' : self.optimizer.state_dict()}
self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_iter{}.pth'.format(curr_iter)))
self.tlog.log_message("[iter:{}] model saved.".format(curr_iter), "LOG", "train")
if curr_iter % self.args.log_every == 0:
if not self.args.quiet:
tqdm.write("[#{: 3d}] {} iter mean loss: {:.5f}".format(curr_iter, self.args.log_every, total_loss/data_num))
self.tlog.log("train", [curr_iter, float(total_loss/data_num)])
self.tlog.log_message("[{}] {} iter mean loss:{:.5f}".format("iter:{}".format(curr_iter), self.args.log_every, float(total_loss/data_num)), "LOG", "train")
total_loss = 0
data_num = 0
if curr_iter == self.args.max_iter:
train_finish = True
_train_loader.close()
break
if train_finish:
epochs.close()
break
if not self.iter_wise:
if not self.args.quiet:
tqdm.write("[# {: 3d}] batch mean loss: {:.5f}".format(epoch, total_loss/data_num))
if epoch % self.args.log_every == 0:
self.tlog.log("train", [epoch, float(total_loss/data_num)])
self.tlog.log_message("[{}] batch mean loss:{:.5f}".format("epoch:{}".format(epoch), float(total_loss/data_num)), "LOG", "train")
if epoch % self.args.trainval_every == 0:
self.validate(epoch)
self.lr_policy.decay_lr(**decay_arg)
if epoch % self.args.save_every == 0:
state = {'epoch': epoch,
'optimizer_state_dict' : self.optimizer.state_dict()}
self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_e{}.pth'.format(epoch)))
self.tlog.log_message("[epoch:{}] model saved.".format(epoch), "LOG", "train")
self.model.save(add_state={'optimizer_state_dict' : self.optimizer.state_dict()},
file_name=os.path.join(self.model_param_dir, 'model_param_fin_{}.pth'.format(datetime.now().strftime("%Y%m%d_%H-%M-%S"))))
print("data is saved at {}".format(self.save_dir))
def test_loader(self):
from matplotlib import pylab as plt
import time
if self.args.quiet:
epochs = range(1, self.args.epochs+1)
else:
epochs = self.to_tqdm(range(1, self.args.epochs+1), desc="train")
for epoch in epochs:
if self.args.quiet:
_train_loader = self.train_loader
else:
_train_loader = self.to_tqdm(self.train_loader)
for img, mask in _train_loader:
batch_size = img.shape[0]
img = img.numpy()
mask = mask.numpy()
for i in range(batch_size):
_img = np.uint8(img[i]*255).transpose(1,2,0)
_mask = self.convert_to_color_map(np.uint8(mask[i]), self.cmap)
merged_img = np.concatenate([_img, _mask], axis=1)
plt.imshow(merged_img)
plt.show()
| true
| true
|
f71a280976585c5919618be25b73b5e66de54cdf
| 4,197
|
py
|
Python
|
ucsmsdk/mometa/comm/CommSyslogClient.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/comm/CommSyslogClient.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/comm/CommSyslogClient.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for CommSyslogClient ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class CommSyslogClientConsts():
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
FORWARDING_FACILITY_LOCAL0 = "local0"
FORWARDING_FACILITY_LOCAL1 = "local1"
FORWARDING_FACILITY_LOCAL2 = "local2"
FORWARDING_FACILITY_LOCAL3 = "local3"
FORWARDING_FACILITY_LOCAL4 = "local4"
FORWARDING_FACILITY_LOCAL5 = "local5"
FORWARDING_FACILITY_LOCAL6 = "local6"
FORWARDING_FACILITY_LOCAL7 = "local7"
NAME_PRIMARY = "primary"
NAME_SECONDARY = "secondary"
NAME_TERTIARY = "tertiary"
SEVERITY_ALERTS = "alerts"
SEVERITY_CRITICAL = "critical"
SEVERITY_DEBUGGING = "debugging"
SEVERITY_EMERGENCIES = "emergencies"
SEVERITY_ERRORS = "errors"
SEVERITY_INFORMATION = "information"
SEVERITY_NOTIFICATIONS = "notifications"
SEVERITY_WARNINGS = "warnings"
class CommSyslogClient(ManagedObject):
"""This is CommSyslogClient class."""
consts = CommSyslogClientConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("CommSyslogClient", "commSyslogClient", "client-[name]", VersionMeta.Version101e, "InputOutput", 0x3ff, [], ["admin", "operations"], [u'commSyslog'], [], ["Get", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disabled", "enabled"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"forwarding_facility": MoPropertyMeta("forwarding_facility", "forwardingFacility", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x40, None, None, None, ["primary", "secondary", "tertiary"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"severity": MoPropertyMeta("severity", "severity", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["alerts", "critical", "debugging", "emergencies", "errors", "information", "notifications", "warnings"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"forwardingFacility": "forwarding_facility",
"hostname": "hostname",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"severity": "severity",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.forwarding_facility = None
self.hostname = None
self.sacl = None
self.severity = None
self.status = None
ManagedObject.__init__(self, "CommSyslogClient", parent_mo_or_dn, **kwargs)
| 52.4625
| 264
| 0.671432
|
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class CommSyslogClientConsts():
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
FORWARDING_FACILITY_LOCAL0 = "local0"
FORWARDING_FACILITY_LOCAL1 = "local1"
FORWARDING_FACILITY_LOCAL2 = "local2"
FORWARDING_FACILITY_LOCAL3 = "local3"
FORWARDING_FACILITY_LOCAL4 = "local4"
FORWARDING_FACILITY_LOCAL5 = "local5"
FORWARDING_FACILITY_LOCAL6 = "local6"
FORWARDING_FACILITY_LOCAL7 = "local7"
NAME_PRIMARY = "primary"
NAME_SECONDARY = "secondary"
NAME_TERTIARY = "tertiary"
SEVERITY_ALERTS = "alerts"
SEVERITY_CRITICAL = "critical"
SEVERITY_DEBUGGING = "debugging"
SEVERITY_EMERGENCIES = "emergencies"
SEVERITY_ERRORS = "errors"
SEVERITY_INFORMATION = "information"
SEVERITY_NOTIFICATIONS = "notifications"
SEVERITY_WARNINGS = "warnings"
class CommSyslogClient(ManagedObject):
consts = CommSyslogClientConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("CommSyslogClient", "commSyslogClient", "client-[name]", VersionMeta.Version101e, "InputOutput", 0x3ff, [], ["admin", "operations"], [u'commSyslog'], [], ["Get", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disabled", "enabled"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"forwarding_facility": MoPropertyMeta("forwarding_facility", "forwardingFacility", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x40, None, None, None, ["primary", "secondary", "tertiary"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"severity": MoPropertyMeta("severity", "severity", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["alerts", "critical", "debugging", "emergencies", "errors", "information", "notifications", "warnings"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"forwardingFacility": "forwarding_facility",
"hostname": "hostname",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"severity": "severity",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.forwarding_facility = None
self.hostname = None
self.sacl = None
self.severity = None
self.status = None
ManagedObject.__init__(self, "CommSyslogClient", parent_mo_or_dn, **kwargs)
| true
| true
|
f71a28fae36dc01961cc60b2d06bc962234e0ce7
| 12,999
|
py
|
Python
|
hy/macros.py
|
silver-dragon/hy
|
c7b2f47681f54b365da22ec8d65c7dbc59ab7501
|
[
"MIT"
] | null | null | null |
hy/macros.py
|
silver-dragon/hy
|
c7b2f47681f54b365da22ec8d65c7dbc59ab7501
|
[
"MIT"
] | null | null | null |
hy/macros.py
|
silver-dragon/hy
|
c7b2f47681f54b365da22ec8d65c7dbc59ab7501
|
[
"MIT"
] | null | null | null |
# Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import sys
import builtins
import importlib
import inspect
import pkgutil
import traceback
from ast import AST
from funcparserlib.parser import NoParseError
from hy._compat import PY3_8
from hy.model_patterns import whole
from hy.models import replace_hy_obj, Expression, Symbol, as_model, is_unpack
from hy.lex import mangle, unmangle
from hy.errors import (HyLanguageError, HyMacroExpansionError, HyTypeError,
HyRequireError)
import hy.compiler
EXTRA_MACROS = ["hy.core.result_macros", "hy.core.macros"]
def macro(name):
"""Decorator to define a macro called `name`.
"""
return lambda fn: install_macro(name, fn, fn)
def pattern_macro(names, pattern, shadow = None):
pattern = whole(pattern)
py_version_required = None
if isinstance(names, tuple):
py_version_required, names = names
def dec(fn):
def wrapper_maker(name):
def wrapper(hy_compiler, *args):
if (shadow and
any(is_unpack("iterable", x) for x in args)):
# Try a shadow function call with this name instead.
return Expression([
Symbol('hy.core.shadow.' + name),
*args]).replace(hy_compiler.this)
expr = hy_compiler.this
root = unmangle(expr[0])
if (py_version_required and
sys.version_info < py_version_required):
raise hy_compiler._syntax_error(expr,
'`{}` requires Python {} or later'.format(
root,
'.'.join(map(str, py_version_required))))
try:
parse_tree = pattern.parse(args)
except NoParseError as e:
raise hy_compiler._syntax_error(
expr[min(e.state.pos + 1, len(expr) - 1)],
"parse error for pattern macro '{}': {}".format(
root, e.msg.replace("<EOF>", "end of form")))
return fn(hy_compiler, expr, root, *parse_tree)
return wrapper
for name in ([names] if isinstance(names, str) else names):
install_macro(name, wrapper_maker(name), fn)
return fn
return dec
def install_macro(name, fn, module_of):
name = mangle(name)
fn = rename_function(fn, name)
(inspect.getmodule(module_of).__dict__
.setdefault('__macros__', {})[name]) = fn
return fn
def _same_modules(source_module, target_module):
"""Compare the filenames associated with the given modules names.
This tries to not actually load the modules.
"""
if not (source_module or target_module):
return False
if target_module == source_module:
return True
def _get_filename(module):
filename = None
try:
if not inspect.ismodule(module):
loader = pkgutil.get_loader(module)
if isinstance(loader, importlib.machinery.SourceFileLoader):
filename = loader.get_filename()
else:
filename = inspect.getfile(module)
except (TypeError, ImportError):
pass
return filename
source_filename = _get_filename(source_module)
target_filename = _get_filename(target_module)
return (source_filename and target_filename and
source_filename == target_filename)
def require(source_module, target_module, assignments, prefix=""):
"""Load macros from one module into the namespace of another.
This function is called from the macro also named `require`.
Parameters
----------
source_module: str or types.ModuleType
The module from which macros are to be imported.
target_module: str, types.ModuleType or None
The module into which the macros will be loaded. If `None`, then
the caller's namespace.
The latter is useful during evaluation of generated AST/bytecode.
assignments: str or list of tuples of strs
The string "ALL" or a list of macro name and alias pairs.
prefix: str, optional ("")
If nonempty, its value is prepended to the name of each imported macro.
This allows one to emulate namespaced macros, like
"mymacromodule.mymacro", which looks like an attribute of a module.
Returns
-------
out: boolean
Whether or not macros were actually transferred.
"""
if target_module is None:
parent_frame = inspect.stack()[1][0]
target_namespace = parent_frame.f_globals
target_module = target_namespace.get('__name__', None)
elif isinstance(target_module, str):
target_module = importlib.import_module(target_module)
target_namespace = target_module.__dict__
elif inspect.ismodule(target_module):
target_namespace = target_module.__dict__
else:
raise HyTypeError('`target_module` is not a recognized type: {}'.format(
type(target_module)))
# Let's do a quick check to make sure the source module isn't actually
# the module being compiled (e.g. when `runpy` executes a module's code
# in `__main__`).
# We use the module's underlying filename for this (when they exist), since
# it's the most "fixed" attribute.
if _same_modules(source_module, target_module):
return False
if not inspect.ismodule(source_module):
try:
if source_module.startswith("."):
source_dirs = source_module.split(".")
target_dirs = (getattr(target_module, "__name__", target_module)
.split("."))
while (len(source_dirs) > 1
and source_dirs[0] == ""
and target_dirs):
source_dirs.pop(0)
target_dirs.pop()
package = ".".join(target_dirs + source_dirs[:-1])
else:
package = None
source_module = importlib.import_module(source_module, package)
except ImportError as e:
raise HyRequireError(e.args[0]).with_traceback(None)
source_macros = source_module.__dict__.setdefault('__macros__', {})
if not source_module.__macros__:
if assignments != "ALL":
for name, alias in assignments:
try:
require(f"{source_module.__name__}.{mangle(name)}",
target_module,
"ALL",
prefix=alias)
except HyRequireError as e:
raise HyRequireError(f"Cannot import name '{name}'"
f" from '{source_module.__name__}'"
f" ({source_module.__file__})")
return True
else:
return False
target_macros = target_namespace.setdefault('__macros__', {})
if prefix:
prefix += "."
if assignments == "ALL":
name_assigns = [(k, k) for k in source_macros.keys()]
else:
name_assigns = assignments
for name, alias in name_assigns:
_name = mangle(name)
alias = mangle('#' + prefix + unmangle(alias)[1:]
if unmangle(alias).startswith('#')
else prefix + alias)
if _name in source_module.__macros__:
target_macros[alias] = source_macros[_name]
else:
raise HyRequireError('Could not require name {} from {}'.format(
_name, source_module))
return True
def load_macros(module):
"""Load the hy builtin macros into module `module_name`,
removing any prior macros set.
It is an error to call this on any module in `hy.core`.
"""
builtin_macros = EXTRA_MACROS
module.__macros__ = {}
for builtin_mod_name in builtin_macros:
builtin_mod = importlib.import_module(builtin_mod_name)
# This may overwrite macros in the module.
if hasattr(builtin_mod, '__macros__'):
module.__macros__.update(getattr(builtin_mod, '__macros__', {}))
class MacroExceptions():
"""wrap non ``HyLanguageError``'s in ``HyMacroExpansionError`` preserving stack trace
used in lieu of ``@contextmanager`` to ensure stack trace contains only internal hy
modules for consistent filtering.
"""
def __init__(self, module, macro_tree, compiler=None):
self.module = module
self.macro_tree = macro_tree
self.compiler = compiler
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is None:
return True
elif not issubclass(exc_type, HyLanguageError):
if self.compiler:
filename = self.compiler.filename
source = self.compiler.source
else:
filename = None
source = None
exc_msg = ' '.join(traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]))
msg = "expanding macro {}\n ".format(str(self.macro_tree[0]))
msg += exc_msg
raise HyMacroExpansionError(msg, self.macro_tree, filename, source)
else:
return False
def macroexpand(tree, module, compiler=None, once=False, result_ok=True):
"""Expand the toplevel macros for the given Hy AST tree.
Load the macros from the given `module`, then expand the (top-level) macros
in `tree` until we no longer can.
`Expression` resulting from macro expansions are assigned the module in
which the macro function is defined (determined using `inspect.getmodule`).
If the resulting `Expression` is itself macro expanded, then the namespace
of the assigned module is checked first for a macro corresponding to the
expression's head/car symbol. If the head/car symbol of such a `Expression`
is not found among the macros of its assigned module's namespace, the
outer-most namespace--e.g. the one given by the `module` parameter--is used
as a fallback.
Parameters
----------
tree: hy.models.Object or list
Hy AST tree.
module: str or types.ModuleType
Module used to determine the local namespace for macros.
compiler: HyASTCompiler, optional
The compiler object passed to expanded macros.
once: boolean, optional
Only expand the first macro in `tree`.
Returns
------
out: hy.models.Object
Returns a mutated tree with macros expanded.
"""
if not inspect.ismodule(module):
module = importlib.import_module(module)
assert not compiler or compiler.module == module
while isinstance(tree, Expression) and tree:
fn = tree[0]
if fn in ("quote", "quasiquote") or not isinstance(fn, Symbol):
break
fn = mangle(fn)
expr_modules = (([] if not hasattr(tree, 'module') else [tree.module])
+ [module])
expr_modules.append(builtins)
# Choose the first namespace with the macro.
m = next((mod.__macros__[fn]
for mod in expr_modules
if fn in getattr(mod, '__macros__', ())),
None)
if not m:
break
with MacroExceptions(module, tree, compiler):
if compiler:
compiler.this = tree
obj = m(compiler, *tree[1:])
if isinstance(obj, (hy.compiler.Result, AST)):
return obj if result_ok else tree
if isinstance(obj, Expression):
obj.module = inspect.getmodule(m)
tree = replace_hy_obj(obj, tree)
if once:
break
tree = as_model(tree)
return tree
def macroexpand_1(tree, module, compiler=None):
"""Expand the toplevel macro from `tree` once, in the context of
`compiler`."""
return macroexpand(tree, module, compiler, once=True)
def rename_function(func, new_name):
"""Creates a copy of a function and [re]sets the name at the code-object
level.
"""
c = func.__code__
new_code = type(c)(*[getattr(c, 'co_{}'.format(a))
if a != 'name' else str(new_name)
for a in code_obj_args])
_fn = type(func)(new_code, func.__globals__, str(new_name),
func.__defaults__, func.__closure__)
_fn.__dict__.update(func.__dict__)
return _fn
code_obj_args = ['argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize',
'flags', 'code', 'consts', 'names', 'varnames', 'filename', 'name',
'firstlineno', 'lnotab', 'freevars', 'cellvars']
if not PY3_8:
code_obj_args.remove("posonlyargcount")
| 34.11811
| 89
| 0.605123
|
import sys
import builtins
import importlib
import inspect
import pkgutil
import traceback
from ast import AST
from funcparserlib.parser import NoParseError
from hy._compat import PY3_8
from hy.model_patterns import whole
from hy.models import replace_hy_obj, Expression, Symbol, as_model, is_unpack
from hy.lex import mangle, unmangle
from hy.errors import (HyLanguageError, HyMacroExpansionError, HyTypeError,
HyRequireError)
import hy.compiler
EXTRA_MACROS = ["hy.core.result_macros", "hy.core.macros"]
def macro(name):
return lambda fn: install_macro(name, fn, fn)
def pattern_macro(names, pattern, shadow = None):
pattern = whole(pattern)
py_version_required = None
if isinstance(names, tuple):
py_version_required, names = names
def dec(fn):
def wrapper_maker(name):
def wrapper(hy_compiler, *args):
if (shadow and
any(is_unpack("iterable", x) for x in args)):
return Expression([
Symbol('hy.core.shadow.' + name),
*args]).replace(hy_compiler.this)
expr = hy_compiler.this
root = unmangle(expr[0])
if (py_version_required and
sys.version_info < py_version_required):
raise hy_compiler._syntax_error(expr,
'`{}` requires Python {} or later'.format(
root,
'.'.join(map(str, py_version_required))))
try:
parse_tree = pattern.parse(args)
except NoParseError as e:
raise hy_compiler._syntax_error(
expr[min(e.state.pos + 1, len(expr) - 1)],
"parse error for pattern macro '{}': {}".format(
root, e.msg.replace("<EOF>", "end of form")))
return fn(hy_compiler, expr, root, *parse_tree)
return wrapper
for name in ([names] if isinstance(names, str) else names):
install_macro(name, wrapper_maker(name), fn)
return fn
return dec
def install_macro(name, fn, module_of):
name = mangle(name)
fn = rename_function(fn, name)
(inspect.getmodule(module_of).__dict__
.setdefault('__macros__', {})[name]) = fn
return fn
def _same_modules(source_module, target_module):
if not (source_module or target_module):
return False
if target_module == source_module:
return True
def _get_filename(module):
filename = None
try:
if not inspect.ismodule(module):
loader = pkgutil.get_loader(module)
if isinstance(loader, importlib.machinery.SourceFileLoader):
filename = loader.get_filename()
else:
filename = inspect.getfile(module)
except (TypeError, ImportError):
pass
return filename
source_filename = _get_filename(source_module)
target_filename = _get_filename(target_module)
return (source_filename and target_filename and
source_filename == target_filename)
def require(source_module, target_module, assignments, prefix=""):
if target_module is None:
parent_frame = inspect.stack()[1][0]
target_namespace = parent_frame.f_globals
target_module = target_namespace.get('__name__', None)
elif isinstance(target_module, str):
target_module = importlib.import_module(target_module)
target_namespace = target_module.__dict__
elif inspect.ismodule(target_module):
target_namespace = target_module.__dict__
else:
raise HyTypeError('`target_module` is not a recognized type: {}'.format(
type(target_module)))
# in `__main__`).
# We use the module's underlying filename for this (when they exist), since
if _same_modules(source_module, target_module):
return False
if not inspect.ismodule(source_module):
try:
if source_module.startswith("."):
source_dirs = source_module.split(".")
target_dirs = (getattr(target_module, "__name__", target_module)
.split("."))
while (len(source_dirs) > 1
and source_dirs[0] == ""
and target_dirs):
source_dirs.pop(0)
target_dirs.pop()
package = ".".join(target_dirs + source_dirs[:-1])
else:
package = None
source_module = importlib.import_module(source_module, package)
except ImportError as e:
raise HyRequireError(e.args[0]).with_traceback(None)
source_macros = source_module.__dict__.setdefault('__macros__', {})
if not source_module.__macros__:
if assignments != "ALL":
for name, alias in assignments:
try:
require(f"{source_module.__name__}.{mangle(name)}",
target_module,
"ALL",
prefix=alias)
except HyRequireError as e:
raise HyRequireError(f"Cannot import name '{name}'"
f" from '{source_module.__name__}'"
f" ({source_module.__file__})")
return True
else:
return False
target_macros = target_namespace.setdefault('__macros__', {})
if prefix:
prefix += "."
if assignments == "ALL":
name_assigns = [(k, k) for k in source_macros.keys()]
else:
name_assigns = assignments
for name, alias in name_assigns:
_name = mangle(name)
alias = mangle('
if unmangle(alias).startswith('
else prefix + alias)
if _name in source_module.__macros__:
target_macros[alias] = source_macros[_name]
else:
raise HyRequireError('Could not require name {} from {}'.format(
_name, source_module))
return True
def load_macros(module):
builtin_macros = EXTRA_MACROS
module.__macros__ = {}
for builtin_mod_name in builtin_macros:
builtin_mod = importlib.import_module(builtin_mod_name)
# This may overwrite macros in the module.
if hasattr(builtin_mod, '__macros__'):
module.__macros__.update(getattr(builtin_mod, '__macros__', {}))
class MacroExceptions():
def __init__(self, module, macro_tree, compiler=None):
self.module = module
self.macro_tree = macro_tree
self.compiler = compiler
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is None:
return True
elif not issubclass(exc_type, HyLanguageError):
if self.compiler:
filename = self.compiler.filename
source = self.compiler.source
else:
filename = None
source = None
exc_msg = ' '.join(traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]))
msg = "expanding macro {}\n ".format(str(self.macro_tree[0]))
msg += exc_msg
raise HyMacroExpansionError(msg, self.macro_tree, filename, source)
else:
return False
def macroexpand(tree, module, compiler=None, once=False, result_ok=True):
if not inspect.ismodule(module):
module = importlib.import_module(module)
assert not compiler or compiler.module == module
while isinstance(tree, Expression) and tree:
fn = tree[0]
if fn in ("quote", "quasiquote") or not isinstance(fn, Symbol):
break
fn = mangle(fn)
expr_modules = (([] if not hasattr(tree, 'module') else [tree.module])
+ [module])
expr_modules.append(builtins)
# Choose the first namespace with the macro.
m = next((mod.__macros__[fn]
for mod in expr_modules
if fn in getattr(mod, '__macros__', ())),
None)
if not m:
break
with MacroExceptions(module, tree, compiler):
if compiler:
compiler.this = tree
obj = m(compiler, *tree[1:])
if isinstance(obj, (hy.compiler.Result, AST)):
return obj if result_ok else tree
if isinstance(obj, Expression):
obj.module = inspect.getmodule(m)
tree = replace_hy_obj(obj, tree)
if once:
break
tree = as_model(tree)
return tree
def macroexpand_1(tree, module, compiler=None):
return macroexpand(tree, module, compiler, once=True)
def rename_function(func, new_name):
c = func.__code__
new_code = type(c)(*[getattr(c, 'co_{}'.format(a))
if a != 'name' else str(new_name)
for a in code_obj_args])
_fn = type(func)(new_code, func.__globals__, str(new_name),
func.__defaults__, func.__closure__)
_fn.__dict__.update(func.__dict__)
return _fn
code_obj_args = ['argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize',
'flags', 'code', 'consts', 'names', 'varnames', 'filename', 'name',
'firstlineno', 'lnotab', 'freevars', 'cellvars']
if not PY3_8:
code_obj_args.remove("posonlyargcount")
| true
| true
|
f71a2b94b5be2676eac49b95b663de23170408de
| 9,927
|
py
|
Python
|
gpt2_model.py
|
solad5/acgan-gpt2
|
52901a996fd235355f8c3f6b83037c85b1fdb415
|
[
"MIT"
] | null | null | null |
gpt2_model.py
|
solad5/acgan-gpt2
|
52901a996fd235355f8c3f6b83037c85b1fdb415
|
[
"MIT"
] | null | null | null |
gpt2_model.py
|
solad5/acgan-gpt2
|
52901a996fd235355f8c3f6b83037c85b1fdb415
|
[
"MIT"
] | null | null | null |
'''
code by TaeHwan Jung(@graykode)
Original Paper and repository here : https://github.com/openai/gpt-2
GPT2 Pytorch Model : https://github.com/huggingface/pytorch-pretrained-BERT
'''
import copy
import torch
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def load_weight(model, state_dict):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
# Make sure we are still sharing the output and input embeddings after loading weights
model.set_tied()
return model
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd:ns, :ns]
# Here the bias b also serves as the mask to remove future information
w = w * b - 1e10 * (1 - b)
w = nn.Softmax(dim=-1)(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
a, present = self.attn(self.ln_1(x), layer_past=layer_past)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x, present
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.n_layer = config.n_layer
self.n_embd = config.n_embd
self.n_vocab = config.vocab_size
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long,
device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
presents = []
for block, layer_past in zip(self.h, past):
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape), presents
class LinearReadoutHead(nn.Module):
def __init__(self, model_embeddings_weights, config):
super().__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class GPT2(nn.Module):
def __init__(self, config):
super().__init__()
self.transformer = Transformer(config)
self.readout_head = LinearReadoutHead(self.transformer.wte.weight, config)
def set_tied(self):
""" Make sure we are sharing the embeddings
"""
self.readout_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
return hidden_states
| 38.476744
| 108
| 0.621739
|
import copy
import torch
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def load_weight(model, state_dict):
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
model.set_tied()
return model
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
n_state = nx
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd:ns, :ns]
w = w * b - 1e10 * (1 - b)
w = nn.Softmax(dim=-1)(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape)
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1]
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value))
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config):
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
a, present = self.attn(self.ln_1(x), layer_past=layer_past)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x, present
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.n_layer = config.n_layer
self.n_embd = config.n_embd
self.n_vocab = config.vocab_size
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
block = Block(config.n_ctx, config, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long,
device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
presents = []
for block, layer_past in zip(self.h, past):
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape), presents
class LinearReadoutHead(nn.Module):
def __init__(self, model_embeddings_weights, config):
super().__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights
def forward(self, hidden_state):
lm_logits = self.decoder(hidden_state)
return lm_logits
class GPT2(nn.Module):
def __init__(self, config):
super().__init__()
self.transformer = Transformer(config)
self.readout_head = LinearReadoutHead(self.transformer.wte.weight, config)
def set_tied(self):
self.readout_head.set_embeddings_weights(self.transformer.wte.weight)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
return hidden_states
| true
| true
|
f71a2c9c59e0ff4712893eebaf781a9ad92104c2
| 4,896
|
py
|
Python
|
library/bigip_software_update.py
|
Larsende/f5_ansible
|
93b0747ba663128e2c8dfc456dad4653cdde4f38
|
[
"Apache-2.0"
] | 12
|
2016-12-29T16:09:21.000Z
|
2019-06-29T14:12:17.000Z
|
library/bigip_software_update.py
|
Larsende/f5_ansible
|
93b0747ba663128e2c8dfc456dad4653cdde4f38
|
[
"Apache-2.0"
] | 24
|
2017-05-24T07:56:56.000Z
|
2017-11-30T09:31:56.000Z
|
library/bigip_software_update.py
|
Larsende/f5_ansible
|
93b0747ba663128e2c8dfc456dad4653cdde4f38
|
[
"Apache-2.0"
] | 26
|
2017-05-31T17:15:32.000Z
|
2021-03-29T03:45:06.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: "2.4"
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
required: False
default: None
choices:
- yes
- no
frequency:
description:
- Specifies the schedule for the automatic update check.
required: False
default: None
choices:
- daily
- monthly
- weekly
notes:
- Requires the f5-sdk Python package on the host This is as easy as pip
install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.2.3
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check'
}
updatables = [
'auto_check', 'frequency'
]
returnables = [
'auto_check', 'frequency'
]
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] in [True, 'enabled']:
return 'enabled'
else:
return 'disabled'
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.network == 'default':
result['network'] = None
elif self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.software.update.load()
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.software.update.load()
result = resource.attrs
return Parameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
auto_check=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| 25.5
| 91
| 0.607639
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: "2.4"
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
required: False
default: None
choices:
- yes
- no
frequency:
description:
- Specifies the schedule for the automatic update check.
required: False
default: None
choices:
- daily
- monthly
- weekly
notes:
- Requires the f5-sdk Python package on the host This is as easy as pip
install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.2.3
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check'
}
updatables = [
'auto_check', 'frequency'
]
returnables = [
'auto_check', 'frequency'
]
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] in [True, 'enabled']:
return 'enabled'
else:
return 'disabled'
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.network == 'default':
result['network'] = None
elif self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.software.update.load()
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.software.update.load()
result = resource.attrs
return Parameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
auto_check=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| true
| true
|
f71a2cf03b51c5cbf16bd9aeb093968dd349cef9
| 7,353
|
py
|
Python
|
take_images.py
|
ManuLado/Enviar-comandos-a-marlin
|
f7f474ad0459602176114c62e7c97874cb69191b
|
[
"MIT"
] | 2
|
2021-10-02T20:20:45.000Z
|
2021-10-02T20:20:53.000Z
|
take_images.py
|
ManuLado/2D-XRay_Scan_control
|
5ba596c9b0db47125e2e29ed8084e61d326e8777
|
[
"MIT"
] | null | null | null |
take_images.py
|
ManuLado/2D-XRay_Scan_control
|
5ba596c9b0db47125e2e29ed8084e61d326e8777
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Graba video leido desde la arducam
# Se le debe indicar el archivo de video a grabar y
# la duración de la captura en segundos.
# SINTAXIS: python capturar_video.py VIDEO TIEMPO
# 1- Ruta del video
# 2- Tiempo de grabacion en segundos
from ctypes import *
import ctypes
import sys
import os
import time
from PIL import Image
import numpy as np
import thread as thread
import math
from select import select
from evdev import InputDevice
from evdev import ecodes
from astropy.io import fits
import ArducamSDK
# Analisis de argumentos
if (len(sys.argv)==3):
NOMBREIMG = sys.argv[1];
NUMIMG = int(sys.argv[2]);
else:
print ("Se requieren 2 argumentos: NOMBRE_IMAGENES NUMERO_IMAGENES")
exit()
#### CONFIGURACION ARDUCAMSDK ################
COLOR_BYTE2RGB = 47 # No se modifico del original
CAMERA_MT9M001 = 0x4D091031 # No se modifico del original
SensorShipAddr = 186
I2C_MODE_8_16 = 1
usbVid = 0x52CB # No se modifico del original
Width = 1280 #1280
Height = 1024 #1024
cfg ={"u32CameraType":CAMERA_MT9M001,
"u32Width":Width,"u32Height":Height,
"u32UsbVersion":1,
"u8PixelBytes":1,
"u16Vid":0x52cb,
"u8PixelBits":8,
"u32SensorShipAddr":SensorShipAddr,
"emI2cMode":I2C_MODE_8_16 }
# FLAGS
global saveFlag,downFlag,flag,H_value,V_value,lx,ly,mx,my,dx,dy,W_zoom,H_zooM,handle,openFlag,initTime,storeFlag,bufferData,globalGain
global testPatternFlag
global integrationTime
global shutterWidth
openFlag = False
handle = {}
downFlag = False
flag = True
saveFlag = False
storeFlag = False
saveNum=0
H_value = 0
V_value = 0
W_zoom = 0
H_zoom = 0
lx = 0
ly = 0
mx = 0
my = 0
dx = 0
dy = 0
testPatternFlag = False;
regArr=[[0x01, 0x000C], # Row Start
[0x02, 0x0014], # Column Start
[0x03, Height - 1], # Window Height 0x03FF
[0x04, Width - 1], # Window Width 0x04FF
[0x05, 0x0009], # Horizontal Blanking
[0x06, 0x0019], # Vertical Blanking
[0x07, 0x0002], # Output Control
[0x09, 0x0419], # Shutter Width 0x0419 (max: 0x3FFF)
[0x0B, 0x0000], # Frame Restart
[0x0C, 0x0000],#0x0100],
[0x0D, 0x0000],
[0x1E, 0x8000], # Read Mode 1 0x8000
[0x20, 0x1104],
[0x2B, 0x0008],
[0x2C, 0x0008],
[0x2D, 0x0008],
[0x2E, 0x0008],
[0x32, 0x0FFC], # Test Data Register
[0x35, 0x0067], # Global Gain 0x0008 (max: 0x0067)
[0x5F, 0x0904],
#[0x60, 0x0000], # BLC offset: Even row, even column
#[0x61, 0x0000], # BLC offset: Odd row, odd column
#[0x62, 0x049F], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)
#[0x63, 0x0000], # BLC offset: Even row, odd column
#[0x64, 0x0000], # BLC offset: Odd row, Even column
[0x60, 0x002F], # BLC offset: Even row, even column
[0x61, 0x002F], # BLC offset: Odd row, odd column
[0x62, 0x0499], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)
[0x63, 0x000F], # BLC offset: Even row, odd column
[0x64, 0x000F], # BLC offset: Odd row, Even column
[0xF1, 0x0001],
[0xFFFF, 0xFFFF]
]
globalGain = regArr[18][1];
# Cálculo del tiempo de integración inicial (pag 16 del datasheet)
rowTime = regArr[3][1] + 1 + 244 + regArr[4][1] - 19; #[pixel clock periods] default: 1514
resetDelay = 4*regArr[9][1] #[pixel clock periods] default: 0
overheadTime = 180; #[pixel clock periods]
shutterWidth = regArr[7][1]
integrationPeriods = shutterWidth*rowTime - overheadTime - resetDelay;
clockPeriod = 1000.0/24e6; #[ms]
integrationTime = integrationPeriods * clockPeriod; #[ms]
with open('integrationtime.txt','w') as it:
it.write(str(integrationTime)+"\n")
print ("Initial integration time: %.3fms"%(integrationTime));
print ("Initial gain: 0x%02x"%(globalGain));
a_lock = thread.allocate_lock();
def readThread(threadName,read_Flag):
global flag,handle,storeFlag,bufferData,openFlag
global a_lock
count = 0
time0 = time.time()
time1 = time.time()
data = {}
# Wait for the arducam object to be ready
while openFlag == False:
time1 = time.time();
if time1 - time0 > 20:
#timeout
exit;
while flag:
res = ArducamSDK.Py_ArduCam_available(handle)
#~ print "Available frames %d"%(res)
if res > 0:
res,data = ArducamSDK.Py_ArduCam_read(handle,Width * Height)
if res == 0:
count += 1
time1 = time.time()
ArducamSDK.Py_ArduCam_del(handle)
else:
print ("read data fail!")
else:
#print "No data availiable"
time.sleep(.01);
if len(data) >= Width * Height:
if time1 - time0 >= 5:
print ("%s %f %s\n"%("fps:",count*1.0/(time1-time0),"/s"))
count = 0
time0 = time1
a_lock.acquire();
bufferData = data;
data = [];
storeFlag = True;
a_lock.release();
#show(data)
#else:
# print "data length is not enough!"
if flag == False:
break
thread.start_new_thread( readThread,("Thread-2", flag,))
pass
def showAndSave(threadName,algoquenoseusa):
global flag,W_zoom,H_zoom,V_value,H_value,lx,ly,downFlag,saveFlag,saveNum,bufferData,storeFlag
global a_lock
global hist_ax
global NOMBREIMG
img = np.zeros((Height, Width), dtype=np.uint8);
while flag:
a_lock.acquire();
if storeFlag == True:
storeFlag = False;
img = np.frombuffer(bufferData, np.uint8)
img = np.reshape(img, (Height, Width));
saveNum += 1
#name = NOMBREIMG + str(saveNum) + ".fits"
#name = NOMBREIMG + "_" + str(saveNum) + ".jpeg"
name = NOMBREIMG + ".fits"
hdu=fits.PrimaryHDU()
hdu.data=img
hdu.writeto(name,overwrite=True)
print ("Frame saved to %s"%(name))
a_lock.release();
if saveNum == NUMIMG:
flag=False;
print ("Total number of adq images = %d"%(saveNum))
if flag == False:
break
thread.start_new_thread( showAndSave,("Thread-3",flag))
pass
def init_and_read_arducam():
global flag,regArr,handle,openFlag
regNum = 0
res,handle = ArducamSDK.Py_ArduCam_autoopen(cfg)
if res == 0:
openFlag = True
print ("device open success!")
while (regArr[regNum][0] != 0xFFFF):
ArducamSDK.Py_ArduCam_writeSensorReg(handle,regArr[regNum][0],regArr[regNum][1])
regNum = regNum + 1
res = ArducamSDK.Py_ArduCam_beginCapture(handle)
if res == 0:
print ("transfer task create success!")
while flag :
res = ArducamSDK.Py_ArduCam_capture(handle)
if res != 0:
print ("capture failed!")
flag = False;
break;
time.sleep(0.1)
if flag == False:
break
else:
print ("transfer task create fail!")
time.sleep(2);
res = ArducamSDK.Py_ArduCam_close(handle)
if res == 0:
openFlag = False
print ("device close success!")
else:
print ("device close fail!")
else:
print ("device open fail!")
if __name__ == "__main__":
initTime = time.time();
init_and_read_arducam();
| 28.610895
| 134
| 0.622195
|
from ctypes import *
import ctypes
import sys
import os
import time
from PIL import Image
import numpy as np
import thread as thread
import math
from select import select
from evdev import InputDevice
from evdev import ecodes
from astropy.io import fits
import ArducamSDK
if (len(sys.argv)==3):
NOMBREIMG = sys.argv[1];
NUMIMG = int(sys.argv[2]);
else:
print ("Se requieren 2 argumentos: NOMBRE_IMAGENES NUMERO_IMAGENES")
exit()
"u16Vid":0x52cb,
"u8PixelBits":8,
"u32SensorShipAddr":SensorShipAddr,
"emI2cMode":I2C_MODE_8_16 }
global saveFlag,downFlag,flag,H_value,V_value,lx,ly,mx,my,dx,dy,W_zoom,H_zooM,handle,openFlag,initTime,storeFlag,bufferData,globalGain
global testPatternFlag
global integrationTime
global shutterWidth
openFlag = False
handle = {}
downFlag = False
flag = True
saveFlag = False
storeFlag = False
saveNum=0
H_value = 0
V_value = 0
W_zoom = 0
H_zoom = 0
lx = 0
ly = 0
mx = 0
my = 0
dx = 0
dy = 0
testPatternFlag = False;
regArr=[[0x01, 0x000C],
[0x02, 0x0014],
[0x03, Height - 1],
[0x04, Width - 1],
[0x05, 0x0009],
[0x06, 0x0019],
[0x07, 0x0002],
[0x09, 0x0419],
[0x0B, 0x0000],
[0x0C, 0x0000],
[0x0D, 0x0000],
[0x1E, 0x8000],
[0x20, 0x1104],
[0x2B, 0x0008],
[0x2C, 0x0008],
[0x2D, 0x0008],
[0x2E, 0x0008],
[0x32, 0x0FFC],
[0x35, 0x0067],
[0x5F, 0x0904],
4 + regArr[4][1] - 19;
resetDelay = 4*regArr[9][1]
overheadTime = 180;
shutterWidth = regArr[7][1]
integrationPeriods = shutterWidth*rowTime - overheadTime - resetDelay;
clockPeriod = 1000.0/24e6;
integrationTime = integrationPeriods * clockPeriod;
with open('integrationtime.txt','w') as it:
it.write(str(integrationTime)+"\n")
print ("Initial integration time: %.3fms"%(integrationTime));
print ("Initial gain: 0x%02x"%(globalGain));
a_lock = thread.allocate_lock();
def readThread(threadName,read_Flag):
global flag,handle,storeFlag,bufferData,openFlag
global a_lock
count = 0
time0 = time.time()
time1 = time.time()
data = {}
while openFlag == False:
time1 = time.time();
if time1 - time0 > 20:
exit;
while flag:
res = ArducamSDK.Py_ArduCam_available(handle)
if res > 0:
res,data = ArducamSDK.Py_ArduCam_read(handle,Width * Height)
if res == 0:
count += 1
time1 = time.time()
ArducamSDK.Py_ArduCam_del(handle)
else:
print ("read data fail!")
else:
time.sleep(.01);
if len(data) >= Width * Height:
if time1 - time0 >= 5:
print ("%s %f %s\n"%("fps:",count*1.0/(time1-time0),"/s"))
count = 0
time0 = time1
a_lock.acquire();
bufferData = data;
data = [];
storeFlag = True;
a_lock.release();
if flag == False:
break
thread.start_new_thread( readThread,("Thread-2", flag,))
pass
def showAndSave(threadName,algoquenoseusa):
global flag,W_zoom,H_zoom,V_value,H_value,lx,ly,downFlag,saveFlag,saveNum,bufferData,storeFlag
global a_lock
global hist_ax
global NOMBREIMG
img = np.zeros((Height, Width), dtype=np.uint8);
while flag:
a_lock.acquire();
if storeFlag == True:
storeFlag = False;
img = np.frombuffer(bufferData, np.uint8)
img = np.reshape(img, (Height, Width));
saveNum += 1
name = NOMBREIMG + ".fits"
hdu=fits.PrimaryHDU()
hdu.data=img
hdu.writeto(name,overwrite=True)
print ("Frame saved to %s"%(name))
a_lock.release();
if saveNum == NUMIMG:
flag=False;
print ("Total number of adq images = %d"%(saveNum))
if flag == False:
break
thread.start_new_thread( showAndSave,("Thread-3",flag))
pass
def init_and_read_arducam():
global flag,regArr,handle,openFlag
regNum = 0
res,handle = ArducamSDK.Py_ArduCam_autoopen(cfg)
if res == 0:
openFlag = True
print ("device open success!")
while (regArr[regNum][0] != 0xFFFF):
ArducamSDK.Py_ArduCam_writeSensorReg(handle,regArr[regNum][0],regArr[regNum][1])
regNum = regNum + 1
res = ArducamSDK.Py_ArduCam_beginCapture(handle)
if res == 0:
print ("transfer task create success!")
while flag :
res = ArducamSDK.Py_ArduCam_capture(handle)
if res != 0:
print ("capture failed!")
flag = False;
break;
time.sleep(0.1)
if flag == False:
break
else:
print ("transfer task create fail!")
time.sleep(2);
res = ArducamSDK.Py_ArduCam_close(handle)
if res == 0:
openFlag = False
print ("device close success!")
else:
print ("device close fail!")
else:
print ("device open fail!")
if __name__ == "__main__":
initTime = time.time();
init_and_read_arducam();
| true
| true
|
f71a2d96365d53c5ef530130fb564554ef725c20
| 1,117
|
py
|
Python
|
lib/surface/eventflow/triggers/__init__.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/eventflow/triggers/__init__.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/eventflow/triggers/__init__.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud eventflow triggers group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Triggers(base.Group):
"""View and manage your Eventflow triggers.
This set of commands can be used to view and manage your Eventflow resources.
"""
detailed_help = {
'EXAMPLES': """\
To list your existing triggers, run:
$ {command} list
""",
}
| 30.189189
| 79
| 0.726052
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Triggers(base.Group):
detailed_help = {
'EXAMPLES': """\
To list your existing triggers, run:
$ {command} list
""",
}
| true
| true
|
f71a2de92ecf79a70555c5ed5b4cafbc45bf3a74
| 4,851
|
py
|
Python
|
tempest/cli/simple_read_only/test_cinder.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/cli/simple_read_only/test_cinder.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/cli/simple_read_only/test_cinder.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import subprocess
import tempest.cli
LOG = logging.getLogger(__name__)
class SimpleReadOnlyCinderClientTest(tempest.cli.ClientTestBase):
"""Basic, read-only tests for Cinder CLI client.
Checks return values and output of read-only commands.
These tests do not presume any content, nor do they create
their own. They only verify the structure of output if present.
"""
def test_cinder_fake_action(self):
self.assertRaises(subprocess.CalledProcessError,
self.cinder,
'this-does-not-exist')
def test_cinder_absolute_limit_list(self):
roles = self.parser.listing(self.cinder('absolute-limits'))
self.assertTableStruct(roles, ['Name', 'Value'])
def test_cinder_backup_list(self):
self.cinder('backup-list')
def test_cinder_extra_specs_list(self):
self.cinder('extra-specs-list')
def test_cinder_volumes_list(self):
self.cinder('list')
def test_cinder_quota_class_show(self):
"""This CLI can accept and string as param."""
roles = self.parser.listing(self.cinder('quota-class-show',
params='abc'))
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_quota_defaults(self):
"""This CLI can accept and string as param."""
roles = self.parser.listing(self.cinder('quota-defaults',
params=self.identity.
admin_tenant_name))
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_quota_show(self):
"""This CLI can accept and string as param."""
roles = self.parser.listing(self.cinder('quota-show',
params=self.identity.
admin_tenant_name))
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_rate_limits(self):
self.cinder('rate-limits')
def test_cinder_snapshot_list(self):
self.cinder('snapshot-list')
def test_cinder_type_list(self):
self.cinder('type-list')
def test_cinder_list_extensions(self):
self.cinder('list-extensions')
roles = self.parser.listing(self.cinder('list-extensions'))
self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated'])
def test_cinder_credentials(self):
self.cinder('credentials')
def test_cinder_availability_zone_list(self):
self.cinder('availability-zone-list')
def test_cinder_endpoints(self):
self.cinder('endpoints')
def test_cinder_service_list(self):
self.cinder('service-list')
def test_cinder_transfer_list(self):
self.cinder('transfer-list')
def test_cinder_bash_completion(self):
self.cinder('bash-completion')
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
cmds_end = lines.index('Optional arguments:')
command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
for line in lines[cmds_start:cmds_end]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('absolute-limits', 'list', 'help',
'quota-show', 'type-list', 'snapshot-list'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_cinder_version(self):
self.cinder('', flags='--version')
def test_cinder_debug_list(self):
self.cinder('list', flags='--debug')
def test_cinder_retries_list(self):
self.cinder('list', flags='--retries 3')
def test_cinder_region_list(self):
region = self.config.volume.region
if not region:
region = self.config.identity.region
self.cinder('list', flags='--os-region-name ' + region)
| 35.408759
| 78
| 0.632035
|
import logging
import re
import subprocess
import tempest.cli
LOG = logging.getLogger(__name__)
class SimpleReadOnlyCinderClientTest(tempest.cli.ClientTestBase):
def test_cinder_fake_action(self):
self.assertRaises(subprocess.CalledProcessError,
self.cinder,
'this-does-not-exist')
def test_cinder_absolute_limit_list(self):
roles = self.parser.listing(self.cinder('absolute-limits'))
self.assertTableStruct(roles, ['Name', 'Value'])
def test_cinder_backup_list(self):
self.cinder('backup-list')
def test_cinder_extra_specs_list(self):
self.cinder('extra-specs-list')
def test_cinder_volumes_list(self):
self.cinder('list')
def test_cinder_quota_class_show(self):
roles = self.parser.listing(self.cinder('quota-class-show',
params='abc'))
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_quota_defaults(self):
roles = self.parser.listing(self.cinder('quota-defaults',
params=self.identity.
admin_tenant_name))
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_quota_show(self):
roles = self.parser.listing(self.cinder('quota-show',
params=self.identity.
admin_tenant_name))
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_rate_limits(self):
self.cinder('rate-limits')
def test_cinder_snapshot_list(self):
self.cinder('snapshot-list')
def test_cinder_type_list(self):
self.cinder('type-list')
def test_cinder_list_extensions(self):
self.cinder('list-extensions')
roles = self.parser.listing(self.cinder('list-extensions'))
self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated'])
def test_cinder_credentials(self):
self.cinder('credentials')
def test_cinder_availability_zone_list(self):
self.cinder('availability-zone-list')
def test_cinder_endpoints(self):
self.cinder('endpoints')
def test_cinder_service_list(self):
self.cinder('service-list')
def test_cinder_transfer_list(self):
self.cinder('transfer-list')
def test_cinder_bash_completion(self):
self.cinder('bash-completion')
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
cmds_end = lines.index('Optional arguments:')
command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
for line in lines[cmds_start:cmds_end]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('absolute-limits', 'list', 'help',
'quota-show', 'type-list', 'snapshot-list'))
self.assertFalse(wanted_commands - commands)
def test_cinder_version(self):
self.cinder('', flags='--version')
def test_cinder_debug_list(self):
self.cinder('list', flags='--debug')
def test_cinder_retries_list(self):
self.cinder('list', flags='--retries 3')
def test_cinder_region_list(self):
region = self.config.volume.region
if not region:
region = self.config.identity.region
self.cinder('list', flags='--os-region-name ' + region)
| true
| true
|
f71a2e2450c7afe71a1025c53865035c1ff60cb5
| 268
|
py
|
Python
|
highiq/io/__init__.py
|
ClariNerd617/HighIQ
|
0305902f889da869535834620bb4fb15ac54b11d
|
[
"BSD-3-Clause"
] | 6
|
2020-03-16T14:14:45.000Z
|
2021-09-21T06:39:57.000Z
|
highiq/io/__init__.py
|
ClariNerd617/HighIQ
|
0305902f889da869535834620bb4fb15ac54b11d
|
[
"BSD-3-Clause"
] | null | null | null |
highiq/io/__init__.py
|
ClariNerd617/HighIQ
|
0305902f889da869535834620bb4fb15ac54b11d
|
[
"BSD-3-Clause"
] | 3
|
2019-12-16T19:56:35.000Z
|
2021-06-09T14:14:47.000Z
|
"""
=========
highiq.io
=========
.. currentmodule:: highiq.io
This module contains the I/O methods for loading data into and saving data from HighIQ analyses.
.. autosummary::
:toctree: generated/
load_arm_netcdf
"""
from .arm_data import load_arm_netcdf
| 16.75
| 96
| 0.682836
|
from .arm_data import load_arm_netcdf
| true
| true
|
f71a2e415b2e9d0db183f02c832c777618bce8e9
| 1,292
|
py
|
Python
|
model-optimizer/extensions/back/RNNSequenceTypeRename.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/back/RNNSequenceTypeRename.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 19
|
2021-03-26T08:11:00.000Z
|
2022-02-21T13:06:26.000Z
|
model-optimizer/extensions/back/RNNSequenceTypeRename.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 1
|
2021-07-28T17:30:46.000Z
|
2021-07-28T17:30:46.000Z
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.back.replacement import BackReplacementPattern
from mo.graph.graph import Graph
class RNNSequence(BackReplacementPattern):
"""
This transform change type RNNSequence (internal MO type for all recurrent layers)
to correct operation name.
"""
enabled = True
def pattern(self):
return dict(
nodes=[
('rnn_layer', {'type': 'RNNSequence'})
],
edges=[]
)
_supported_ops = ['RNN', 'LSTM', 'GRU']
def replace_pattern(self, graph: Graph, match: dict):
rnn_layer = match['rnn_layer']
assert rnn_layer['op'] in self._supported_ops
rnn_layer['type'] = rnn_layer['op'] + 'Sequence'
| 31.512195
| 86
| 0.681889
|
from mo.back.replacement import BackReplacementPattern
from mo.graph.graph import Graph
class RNNSequence(BackReplacementPattern):
enabled = True
def pattern(self):
return dict(
nodes=[
('rnn_layer', {'type': 'RNNSequence'})
],
edges=[]
)
_supported_ops = ['RNN', 'LSTM', 'GRU']
def replace_pattern(self, graph: Graph, match: dict):
rnn_layer = match['rnn_layer']
assert rnn_layer['op'] in self._supported_ops
rnn_layer['type'] = rnn_layer['op'] + 'Sequence'
| true
| true
|
f71a2e67d16d278f046fedc42260f77f54a931dc
| 2,802
|
py
|
Python
|
vplexapi-7.0.0.0/vplexapi/models/rule_set.py
|
lhernand3z/python-vplex
|
0f94723fd56c7a3a85c4afb3b78046b9c66b93e4
|
[
"Apache-2.0"
] | null | null | null |
vplexapi-7.0.0.0/vplexapi/models/rule_set.py
|
lhernand3z/python-vplex
|
0f94723fd56c7a3a85c4afb3b78046b9c66b93e4
|
[
"Apache-2.0"
] | null | null | null |
vplexapi-7.0.0.0/vplexapi/models/rule_set.py
|
lhernand3z/python-vplex
|
0f94723fd56c7a3a85c4afb3b78046b9c66b93e4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
VPlex REST API
A definition for the next-gen VPlex API # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSet(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None): # noqa: E501
"""RuleSet - a model defined in Swagger""" # noqa: E501
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this RuleSet. # noqa: E501
:return: The name of this RuleSet. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RuleSet.
:param name: The name of this RuleSet. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.79646
| 80
| 0.533904
|
import pprint
import re
import six
class RuleSet(object):
swagger_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None):
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, RuleSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f71a2e87d4b8d901b178fcd9d35e179c33a8334f
| 4,868
|
py
|
Python
|
BoxThermal.py
|
AndrewFalkowski/SODIS_SIM
|
4d5da3e0872ee747d399d66fdee1633e7d2b8ab1
|
[
"MIT"
] | null | null | null |
BoxThermal.py
|
AndrewFalkowski/SODIS_SIM
|
4d5da3e0872ee747d399d66fdee1633e7d2b8ab1
|
[
"MIT"
] | null | null | null |
BoxThermal.py
|
AndrewFalkowski/SODIS_SIM
|
4d5da3e0872ee747d399d66fdee1633e7d2b8ab1
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import numba
import time
from scipy.integrate import odeint
# a sample differential equation dy/dx = (x-y)/2
# def dydx(x,y):
# return ((x-y)/2)
# # find the value of y for a given x using step size h
# # and an initial value y0 at x0
# def rungeKutta(x0, y0, x, h):
# #count num iteratings using step size or step height h
# n = int(((x - x0)/h))
# # iterate for number of iterations
# y = y0
# for i in range(1, n + 1):
# # apply runge kutta formulas to find the next value of y
# k1 = h * dydx(x0, y)
# k2 = h * dydx(x0 + 0.5 * h, y + 0.5 * k1)
# k3 = h * dydx(x0 + 0.5 * h, y + 0.5 * k2)
# k4 = h * dydx(x0 + h, y + k3)
# # update the next value of y
# y = y + (1.0 / 6.0) * (k1 + 2*k2 + 2*k3 + k4)
# # update the next value of x
# x0 = x0 + h
# return y
# # driver method
# x0 = 0
# y = 1
# x = 2
# h = 0.2
# print('The value of y at x is:', rungeKutta(x0, y, x, h))
def box_dim(A_c, h, prct_f):
# all dimensions in meters
box_vol = A_c * h
vol_f = box_vol * prct_f # L
m_a = box_vol * (1-prct_f) * 1.225
m_f = vol_f * 997 # kg
print('Contained Water: ', m_f, 'Liters')
A_s = 4 * h * np.sqrt(A_c)
return m_f, m_a, A_s
# m_f, m_a, A_s = box_dim(0.25, 0.15, 0.9)
def boxODE(x, t, m_f, m_a, A_s):
# constants
A_c = 0.25 # square meters
A_s = A_s
A_f = A_c # square meters
T_amb = 298 # kelvin
T_sky = T_amb - 6 # kelvin
alpha_g = 0.02 # %
alpha_p = 0.98
t_g = 0.9 # %
t_f = 0.85 # %
# print(t)
Irr = 0.0426*(t) + 1.38E-6*(t)**2 - 7.94E-11*(t)**3 + 7.3E-16*(t)**4
# Irr = 600
x_b = 0.065 # insulation thickness meters
x_s = 0.065 # insulation thickness meters
k_i = 1.0 # thermal conductivity of side materials, foamed glass # W/mK
h_rad_g2_g1 = 8
h_cov_g2_g1 = 20
h_rad_g1_sky = 8
h_rad_g1_amb = 8
h_rad_p_g2 = 20
h_cov_a_g2 = 8
h_cov_f_a = 8
h_cov_p_f = 30
h_cov_g1_amb = 65
M_f = m_f * 4.187
M_g1 = 1150 * (A_c * 0.001) * 1.67 # assuming acrylic
M_g2 = M_g1
M_p = 8960 * (A_c * 0.065) * 1.0
# assuming coper
M_a = 0.718 * m_a
# assign each ODE to a vector element
T_g1 = x[0]
T_g2 = x[1]
T_a = x[2]
T_p = x[3]
T_f = x[4]
Q_rad_g2_g1 = h_rad_g2_g1 * A_c * (T_g2 - T_g1)
Q_cov_g2_g1 = h_cov_g2_g1 * A_c * (T_g2 - T_g1)
Q_rad_g1_sky = h_rad_g1_sky * A_c * (T_g1 - T_sky)
Q_cov_g1_amb = h_rad_g1_amb * A_c * (T_g1 - T_amb)
Q_rad_p_g2 = h_rad_p_g2 * A_c * (T_p - T_g2)
Q_cov_a_g2 = h_cov_a_g2 * A_c * (T_a - T_g2)
Q_cov_f_a = h_cov_f_a * (A_c) * (T_f - T_a)
Q_cov_p_f = h_cov_p_f * A_c * (T_p - T_f)
U_base = ((x_b/k_i) + 1/(h_cov_g1_amb))**(-1)
U_side = ((x_s/k_i) + 1/(h_cov_g1_amb))**(-1)
Q_amb_loss = (U_base*A_c + U_side*A_s)*(T_p - T_amb)
# define each ODE
dT_g1dt = (Irr * alpha_g * A_c + Q_rad_g2_g1 + Q_cov_g2_g1 - Q_rad_g1_sky - Q_cov_g1_amb) / M_g1
dT_g2dt = (Irr * alpha_g * t_g * A_c + Q_rad_p_g2 + Q_cov_a_g2 - Q_rad_g2_g1) / M_g2
dT_adt = (Q_cov_f_a - Q_cov_a_g2)/M_a
dT_pdt = (Irr * alpha_p * t_g**2 * t_f * A_c - Q_rad_p_g2 - Q_amb_loss - Q_cov_p_f) / M_p
dT_fdt = (Q_cov_p_f + Q_cov_f_a) / M_f
return [dT_g1dt, dT_g2dt, dT_adt, dT_pdt, dT_fdt]
# x0 = [298, 298, 298, 298, 285]
# # test the defined ODES
# print(boxODE(x=x0, t=0, m_f=m_f, m_a=m_a, A_s=A_s))
# # declare a time vector (time window)
# t = np.linspace(0,54000,1000)
# x = odeint(boxODE,x0,t, args=(m_f, m_a, A_s))
# Tf= x[:,4]
# Tp = x[:,3]
# # plot the results
# plt.plot((t/3600)+5.8,Tf_2, label='fluid')
# # plt.plot(t/3600,Tp, label='plate')
# plt.legend()
# plt.ylim(298, 340)
# plt.xlim(0,24)
# plt.show()
#%%
# xs = np.arange(27000,28201,1)
# ys = 0.0226*xs - 295
# #%%
# fig = plt.figure(figsize=(5,5))
# fig, ax1 = plt.subplots()
# plt.plot((t/3600)+5.8,Tf, color='r')
# plt.plot(xs/3600 + 5.8, ys, color='r')
# plt.plot(np.arange(27000,27601,1)/3600+5.8, )
# plt.hlines(338, -100, 100, linestyle=':', color='k')
# plt.text(6.5, 339, 'Pasteurization Temperature')
# ax1.tick_params(direction='in', length=7,top=True, right=True, left=True)
# minor_locator_x = AutoMinorLocator(2)
# minor_locator_y = AutoMinorLocator(2)
# ax1.get_xaxis().set_minor_locator(minor_locator_x)
# ax1.get_yaxis().set_minor_locator(minor_locator_y)
# # rotate and align the tick labels so they look better
# plt.tick_params(which='minor',
# direction='in',
# length=4,
# right=True,
# left=True,
# top=True)
# plt.xlim(6,21)
# plt.xlabel('Hour of Day')
# plt.ylim(298, 350)
# plt.ylabel('Water Temperature (K)')
# plt.savefig('Figures/comb_img.png', dpi=300)
| 27.044444
| 100
| 0.581758
|
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import numba
import time
from scipy.integrate import odeint
t, m_f, m_a, A_s):
A_c = 0.25
A_s = A_s
A_f = A_c
T_amb = 298
T_sky = T_amb - 6
alpha_g = 0.02
alpha_p = 0.98
t_g = 0.9
t_f = 0.85
Irr = 0.0426*(t) + 1.38E-6*(t)**2 - 7.94E-11*(t)**3 + 7.3E-16*(t)**4
x_b = 0.065
x_s = 0.065
k_i = 1.0 _rad_g2_g1 = 8
h_cov_g2_g1 = 20
h_rad_g1_sky = 8
h_rad_g1_amb = 8
h_rad_p_g2 = 20
h_cov_a_g2 = 8
h_cov_f_a = 8
h_cov_p_f = 30
h_cov_g1_amb = 65
M_f = m_f * 4.187
M_g1 = 1150 * (A_c * 0.001) * 1.67
M_g2 = M_g1
M_p = 8960 * (A_c * 0.065) * 1.0
M_a = 0.718 * m_a
T_g1 = x[0]
T_g2 = x[1]
T_a = x[2]
T_p = x[3]
T_f = x[4]
Q_rad_g2_g1 = h_rad_g2_g1 * A_c * (T_g2 - T_g1)
Q_cov_g2_g1 = h_cov_g2_g1 * A_c * (T_g2 - T_g1)
Q_rad_g1_sky = h_rad_g1_sky * A_c * (T_g1 - T_sky)
Q_cov_g1_amb = h_rad_g1_amb * A_c * (T_g1 - T_amb)
Q_rad_p_g2 = h_rad_p_g2 * A_c * (T_p - T_g2)
Q_cov_a_g2 = h_cov_a_g2 * A_c * (T_a - T_g2)
Q_cov_f_a = h_cov_f_a * (A_c) * (T_f - T_a)
Q_cov_p_f = h_cov_p_f * A_c * (T_p - T_f)
U_base = ((x_b/k_i) + 1/(h_cov_g1_amb))**(-1)
U_side = ((x_s/k_i) + 1/(h_cov_g1_amb))**(-1)
Q_amb_loss = (U_base*A_c + U_side*A_s)*(T_p - T_amb)
dT_g1dt = (Irr * alpha_g * A_c + Q_rad_g2_g1 + Q_cov_g2_g1 - Q_rad_g1_sky - Q_cov_g1_amb) / M_g1
dT_g2dt = (Irr * alpha_g * t_g * A_c + Q_rad_p_g2 + Q_cov_a_g2 - Q_rad_g2_g1) / M_g2
dT_adt = (Q_cov_f_a - Q_cov_a_g2)/M_a
dT_pdt = (Irr * alpha_p * t_g**2 * t_f * A_c - Q_rad_p_g2 - Q_amb_loss - Q_cov_p_f) / M_p
dT_fdt = (Q_cov_p_f + Q_cov_f_a) / M_f
return [dT_g1dt, dT_g2dt, dT_adt, dT_pdt, dT_fdt]
| true
| true
|
f71a2e97febc43b9fe06cbb74dd070431e79c852
| 5,121
|
py
|
Python
|
libweasyl/libweasyl/alembic/versions/e2bedd00b085_fill_journal_and_character_hidden_.py
|
kfkitsune/weasyl
|
7e63c6db98ed2debfadbc277509533f72ea078a5
|
[
"Apache-2.0"
] | 111
|
2016-05-18T04:18:18.000Z
|
2021-11-03T02:05:19.000Z
|
libweasyl/libweasyl/alembic/versions/e2bedd00b085_fill_journal_and_character_hidden_.py
|
Weasyl/weasyl
|
80c86942c6f20a815086e2895fdad51d3aa77eed
|
[
"Apache-2.0"
] | 1,103
|
2016-05-29T05:17:53.000Z
|
2022-03-31T18:12:40.000Z
|
libweasyl/libweasyl/alembic/versions/e2bedd00b085_fill_journal_and_character_hidden_.py
|
kfkitsune/weasyl
|
7e63c6db98ed2debfadbc277509533f72ea078a5
|
[
"Apache-2.0"
] | 47
|
2016-05-29T20:48:37.000Z
|
2021-11-12T09:40:40.000Z
|
"""Fill journal and character hidden/friends-only columns
Revision ID: e2bedd00b085
Revises: 1fbcfecd195e
Create Date: 2021-07-26 05:43:43.742595
"""
# revision identifiers, used by Alembic.
revision = 'e2bedd00b085'
down_revision = '1fbcfecd195e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
BATCH_SIZE = 10_000
def upgrade():
context = op.get_context()
with context.autocommit_block():
max_charid = context.bind.scalar(text("SELECT max(charid) FROM character"))
for i in range(1, max_charid + 1, BATCH_SIZE):
context.bind.execute(
text("UPDATE character SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (charid BETWEEN :start AND :end) AND (hidden IS NULL OR friends_only IS NULL)"),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text("UPDATE character SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (hidden IS NULL OR friends_only IS NULL)"),
)
max_journalid = context.bind.scalar(text("SELECT max(journalid) FROM journal"))
for i in range(1, max_journalid + 1, BATCH_SIZE):
context.bind.execute(
text("UPDATE journal SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (journalid BETWEEN :start AND :end) AND (hidden IS NULL OR friends_only IS NULL)"),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text("UPDATE journal SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (hidden IS NULL OR friends_only IS NULL)"),
)
op.alter_column('character', 'hidden',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
op.alter_column('character', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
op.alter_column('journal', 'hidden',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
op.alter_column('journal', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
def downgrade():
op.alter_column('character', 'hidden',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
op.alter_column('character', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
op.alter_column('journal', 'hidden',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
op.alter_column('journal', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
context = op.get_context()
with context.autocommit_block():
max_charid = context.bind.scalar(text("SELECT max(charid) FROM character"))
for i in range(1, max_charid + 1, BATCH_SIZE):
context.bind.execute(
text(
"UPDATE character SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
" AND (charid BETWEEN :start AND :end)"
),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text(
"UPDATE character SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
),
)
max_journalid = context.bind.scalar(text("SELECT max(journalid) FROM journal"))
for i in range(1, max_journalid + 1, BATCH_SIZE):
context.bind.execute(
text(
"UPDATE journal SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
" AND (journalid BETWEEN :start AND :end)"
),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text(
"UPDATE journal SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
),
)
| 38.503759
| 185
| 0.540129
|
revision = 'e2bedd00b085'
down_revision = '1fbcfecd195e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
BATCH_SIZE = 10_000
def upgrade():
context = op.get_context()
with context.autocommit_block():
max_charid = context.bind.scalar(text("SELECT max(charid) FROM character"))
for i in range(1, max_charid + 1, BATCH_SIZE):
context.bind.execute(
text("UPDATE character SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (charid BETWEEN :start AND :end) AND (hidden IS NULL OR friends_only IS NULL)"),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text("UPDATE character SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (hidden IS NULL OR friends_only IS NULL)"),
)
max_journalid = context.bind.scalar(text("SELECT max(journalid) FROM journal"))
for i in range(1, max_journalid + 1, BATCH_SIZE):
context.bind.execute(
text("UPDATE journal SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (journalid BETWEEN :start AND :end) AND (hidden IS NULL OR friends_only IS NULL)"),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text("UPDATE journal SET hidden = settings ~ 'h', friends_only = settings ~ 'f' WHERE (hidden IS NULL OR friends_only IS NULL)"),
)
op.alter_column('character', 'hidden',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
op.alter_column('character', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
op.alter_column('journal', 'hidden',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
op.alter_column('journal', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default='f',
nullable=False)
def downgrade():
op.alter_column('character', 'hidden',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
op.alter_column('character', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
op.alter_column('journal', 'hidden',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
op.alter_column('journal', 'friends_only',
existing_type=sa.BOOLEAN(),
server_default=None,
nullable=True)
context = op.get_context()
with context.autocommit_block():
max_charid = context.bind.scalar(text("SELECT max(charid) FROM character"))
for i in range(1, max_charid + 1, BATCH_SIZE):
context.bind.execute(
text(
"UPDATE character SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
" AND (charid BETWEEN :start AND :end)"
),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text(
"UPDATE character SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
),
)
max_journalid = context.bind.scalar(text("SELECT max(journalid) FROM journal"))
for i in range(1, max_journalid + 1, BATCH_SIZE):
context.bind.execute(
text(
"UPDATE journal SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
" AND (journalid BETWEEN :start AND :end)"
),
{"start": i, "end": i + BATCH_SIZE - 1},
)
context.bind.execute(
text(
"UPDATE journal SET settings = regexp_replace(settings, '[hf]', '', 'g')"
" || (CASE WHEN hidden THEN 'h' ELSE '' END)"
" || (CASE WHEN friends_only THEN 'f' ELSE '' END)"
" WHERE ((settings ~ 'h') != hidden OR (settings ~ 'f') != friends_only)"
),
)
| true
| true
|
f71a2f238671395b100919c093a517ccf04d98ac
| 2,876
|
py
|
Python
|
resolwe_bio/processes/slamdunk/alleyoop_utrrates.py
|
plojyon/resolwe-bio
|
45d001a78fcc387b5e3239a34c9da7f40d789022
|
[
"Apache-2.0"
] | 12
|
2015-12-07T18:29:27.000Z
|
2022-03-16T08:00:18.000Z
|
resolwe_bio/processes/slamdunk/alleyoop_utrrates.py
|
plojyon/resolwe-bio
|
45d001a78fcc387b5e3239a34c9da7f40d789022
|
[
"Apache-2.0"
] | 480
|
2015-11-20T21:46:43.000Z
|
2022-03-28T12:40:57.000Z
|
resolwe_bio/processes/slamdunk/alleyoop_utrrates.py
|
plojyon/resolwe-bio
|
45d001a78fcc387b5e3239a34c9da7f40d789022
|
[
"Apache-2.0"
] | 45
|
2015-11-19T14:54:07.000Z
|
2022-02-13T21:36:50.000Z
|
"""Run Alleyoop utrrates tool on Slamdunk results."""
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
IntegerField,
Process,
StringField,
)
class AlleyoopUtrRates(Process):
"""Run Alleyoop utrrates."""
slug = "alleyoop-utr-rates"
process_type = "data:alleyoop:utrrates"
name = "Alleyoop UTR Rates"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/slamdunk:2.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
category = "Slamdunk"
data_name = '{{ slamdunk|sample_name|default("?") }}'
version = "1.2.1"
class Input:
"""Input fields for AlleyoopUtrRates."""
ref_seq = DataField(
"seq:nucleotide", label="FASTA file containig sequences for aligning"
)
regions = DataField(
"bed", label="BED file with coordinates of regions of interest"
)
slamdunk = DataField("alignment:bam:slamdunk", label="Slamdunk results")
read_length = IntegerField(
label="Maximum read length",
description="Maximum length of reads in the input FASTQ file",
default=150,
)
class Output:
"""Output fields to process AlleyoopUtrRates."""
report = FileField(
label="Tab-separated file containing conversion rates on each region of interest"
)
plot = FileField(label="Region of interest conversion rate plot")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = os.path.basename(inputs.slamdunk.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
args = [
"-o",
"utrrates",
"-r",
inputs.ref_seq.output.fasta.path,
"-b",
inputs.regions.output.bed.path,
"-l",
inputs.read_length,
]
return_code, _, _ = Cmd["alleyoop"]["utrrates"][args][
inputs.slamdunk.output.bam.path
] & TEE(retcode=None)
if return_code:
self.error("Alleyoop utrrates analysis failed.")
rates_file = os.path.join("utrrates", f"{name}_mutationrates_utr.csv")
rates_file_renamed = os.path.join("utrrates", f"{name}_mutationrates.txt")
os.rename(rates_file, rates_file_renamed)
outputs.report = rates_file_renamed
outputs.plot = os.path.join("utrrates", f"{name}_mutationrates_utr.pdf")
outputs.species = inputs.slamdunk.output.species
outputs.build = inputs.slamdunk.output.build
| 30.273684
| 93
| 0.585883
|
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
IntegerField,
Process,
StringField,
)
class AlleyoopUtrRates(Process):
slug = "alleyoop-utr-rates"
process_type = "data:alleyoop:utrrates"
name = "Alleyoop UTR Rates"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/slamdunk:2.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
category = "Slamdunk"
data_name = '{{ slamdunk|sample_name|default("?") }}'
version = "1.2.1"
class Input:
ref_seq = DataField(
"seq:nucleotide", label="FASTA file containig sequences for aligning"
)
regions = DataField(
"bed", label="BED file with coordinates of regions of interest"
)
slamdunk = DataField("alignment:bam:slamdunk", label="Slamdunk results")
read_length = IntegerField(
label="Maximum read length",
description="Maximum length of reads in the input FASTQ file",
default=150,
)
class Output:
report = FileField(
label="Tab-separated file containing conversion rates on each region of interest"
)
plot = FileField(label="Region of interest conversion rate plot")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
basename = os.path.basename(inputs.slamdunk.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
args = [
"-o",
"utrrates",
"-r",
inputs.ref_seq.output.fasta.path,
"-b",
inputs.regions.output.bed.path,
"-l",
inputs.read_length,
]
return_code, _, _ = Cmd["alleyoop"]["utrrates"][args][
inputs.slamdunk.output.bam.path
] & TEE(retcode=None)
if return_code:
self.error("Alleyoop utrrates analysis failed.")
rates_file = os.path.join("utrrates", f"{name}_mutationrates_utr.csv")
rates_file_renamed = os.path.join("utrrates", f"{name}_mutationrates.txt")
os.rename(rates_file, rates_file_renamed)
outputs.report = rates_file_renamed
outputs.plot = os.path.join("utrrates", f"{name}_mutationrates_utr.pdf")
outputs.species = inputs.slamdunk.output.species
outputs.build = inputs.slamdunk.output.build
| true
| true
|
f71a2fbd3261e086d9f3bcb7623757c304921595
| 3,328
|
py
|
Python
|
fixture/orm.py
|
IKeiran/FPT-Sinyakov
|
08c5121d84c394bcee91d087ac2d14581179d2fd
|
[
"Apache-2.0"
] | null | null | null |
fixture/orm.py
|
IKeiran/FPT-Sinyakov
|
08c5121d84c394bcee91d087ac2d14581179d2fd
|
[
"Apache-2.0"
] | null | null | null |
fixture/orm.py
|
IKeiran/FPT-Sinyakov
|
08c5121d84c394bcee91d087ac2d14581179d2fd
|
[
"Apache-2.0"
] | null | null | null |
from pony.orm import *
from datetime import datetime
from model.contact import Contact
from model.group import Group
from pymysql.converters import decoders
class ORMFixtue:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixtue.ORMContact, table='address_in_groups', column='id', reverse='groups', lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
first_name = Optional(str, column='firstname')
last_name = Optional(str, column='lastname')
address = Optional(str, column='address')
home_phone = Optional(str, column='home')
mobile_phone = Optional(str, column='mobile')
work_phone = Optional(str, column='work')
email_prime = Optional(str, column='email')
email_secondary = Optional(str, column='email2')
email_third = Optional(str, column='email3')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixtue.ORMGroup, table='address_in_groups', column='group_id', reverse='contacts', lazy = True)
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=decoders)
self.db.generate_mapping()
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixtue.ORMGroup))
def convert_contacts_to_model(self, contacts):
def convert(contact):
result = Contact(id=str(contact.id),
first_name=contact.first_name, last_name=contact.last_name, adress=contact.address,
home_phone=contact.home_phone, mobile_phone=contact.mobile_phone, work_phone=contact.work_phone,
email_prime=contact.email_prime, email_secondary=contact.email_secondary, email_third=contact.email_third)
return result
return list(map(convert, contacts))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixtue.ORMContact if c.deprecated is None))
@db_session
def get_orm_group(self, group):
return list(select(g for g in ORMFixtue.ORMGroup if g.id == group.id))[0]
@db_session
def get_contacts_in_group(self, group):
orm_group = self.get_orm_group(group)
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = self.get_orm_group(group)
return self.convert_contacts_to_model(
select(c for c in ORMFixtue.ORMContact if c.deprecated is None and orm_group not in c.groups))
# @db_session
# def get_contact_group_boundry(self):
# return list(select(d for d in ORMFixtue.ORMBoundary))
| 41.6
| 133
| 0.679688
|
from pony.orm import *
from datetime import datetime
from model.contact import Contact
from model.group import Group
from pymysql.converters import decoders
class ORMFixtue:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixtue.ORMContact, table='address_in_groups', column='id', reverse='groups', lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
first_name = Optional(str, column='firstname')
last_name = Optional(str, column='lastname')
address = Optional(str, column='address')
home_phone = Optional(str, column='home')
mobile_phone = Optional(str, column='mobile')
work_phone = Optional(str, column='work')
email_prime = Optional(str, column='email')
email_secondary = Optional(str, column='email2')
email_third = Optional(str, column='email3')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixtue.ORMGroup, table='address_in_groups', column='group_id', reverse='contacts', lazy = True)
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=decoders)
self.db.generate_mapping()
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixtue.ORMGroup))
def convert_contacts_to_model(self, contacts):
def convert(contact):
result = Contact(id=str(contact.id),
first_name=contact.first_name, last_name=contact.last_name, adress=contact.address,
home_phone=contact.home_phone, mobile_phone=contact.mobile_phone, work_phone=contact.work_phone,
email_prime=contact.email_prime, email_secondary=contact.email_secondary, email_third=contact.email_third)
return result
return list(map(convert, contacts))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixtue.ORMContact if c.deprecated is None))
@db_session
def get_orm_group(self, group):
return list(select(g for g in ORMFixtue.ORMGroup if g.id == group.id))[0]
@db_session
def get_contacts_in_group(self, group):
orm_group = self.get_orm_group(group)
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = self.get_orm_group(group)
return self.convert_contacts_to_model(
select(c for c in ORMFixtue.ORMContact if c.deprecated is None and orm_group not in c.groups))
| true
| true
|
f71a300263267957f62029ccbbaaa9d0a69f7565
| 5,677
|
py
|
Python
|
selfdrive/car/chrysler/carstate.py
|
choongsoo/openpilot
|
3441ee566669f40ffaac622b0ef025e5da570af1
|
[
"MIT"
] | 1
|
2022-03-31T05:07:44.000Z
|
2022-03-31T05:07:44.000Z
|
selfdrive/car/chrysler/carstate.py
|
choongsoo/openpilot
|
3441ee566669f40ffaac622b0ef025e5da570af1
|
[
"MIT"
] | null | null | null |
selfdrive/car/chrysler/carstate.py
|
choongsoo/openpilot
|
3441ee566669f40ffaac622b0ef025e5da570af1
|
[
"MIT"
] | null | null | null |
from cereal import car
from common.conversions import Conversions as CV
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.shifter_values = can_define.dv["GEAR"]["PRNDL"]
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
self.frame = int(cp.vl["EPS_STATUS"]["COUNTER"])
ret.doorOpen = any([cp.vl["BCM_1"]["DOOR_OPEN_FL"],
cp.vl["BCM_1"]["DOOR_OPEN_FR"],
cp.vl["BCM_1"]["DOOR_OPEN_RL"],
cp.vl["BCM_1"]["DOOR_OPEN_RR"]])
ret.seatbeltUnlatched = cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_UNLATCHED"] == 1
# brake pedal
ret.brake = 0
ret.brakePressed = cp.vl["ESP_1"]['Brake_Pedal_State'] == 1 # Physical brake pedal switch
# gas pedal
ret.gas = cp.vl["ECM_5"]["Accelerator_Position"]
ret.gasPressed = ret.gas > 1e-5
ret.espDisabled = (cp.vl["TRACTION_BUTTON"]["TRACTION_OFF"] == 1)
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FL"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FR"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RL"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RR"],
unit=1,
)
ret.vEgoRaw = (cp.vl["SPEED_1"]["SPEED_LEFT"] + cp.vl["SPEED_1"]["SPEED_RIGHT"]) / 2.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = not ret.vEgoRaw > 0.001
ret.leftBlinker = cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 1
ret.rightBlinker = cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 2
ret.steeringAngleDeg = cp.vl["STEERING"]["STEER_ANGLE"]
ret.steeringRateDeg = cp.vl["STEERING"]["STEERING_RATE"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl["GEAR"]["PRNDL"], None))
ret.cruiseState.available = cp.vl["DAS_3"]["ACC_AVAILABLE"] == 1 # ACC is white
ret.cruiseState.enabled = cp.vl["DAS_3"]["ACC_ACTIVE"] == 1 # ACC is green
ret.cruiseState.speed = cp.vl["DASHBOARD"]["ACC_SPEED_CONFIG_KPH"] * CV.KPH_TO_MS
# CRUISE_STATE is a three bit msg, 0 is off, 1 and 2 are Non-ACC mode, 3 and 4 are ACC mode, find if there are other states too
ret.cruiseState.nonAdaptive = cp.vl["DASHBOARD"]["CRUISE_STATE"] in (1, 2)
ret.accFaulted = cp.vl["DAS_3"]["ACC_FAULTED"] != 0
ret.steeringTorque = cp.vl["EPS_STATUS"]["TORQUE_DRIVER"]
ret.steeringTorqueEps = cp.vl["EPS_STATUS"]["TORQUE_MOTOR"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
steer_state = cp.vl["EPS_STATUS"]["LKAS_STATE"]
ret.steerFaultPermanent = steer_state == 4 or (steer_state == 0 and ret.vEgo > self.CP.minSteerSpeed)
ret.genericToggle = bool(cp.vl["STEERING_LEVERS"]["HIGH_BEAM_FLASH"])
if self.CP.enableBsm:
ret.leftBlindspot = cp.vl["BLIND_SPOT_WARNINGS"]["BLIND_SPOT_LEFT"] == 1
ret.rightBlindspot = cp.vl["BLIND_SPOT_WARNINGS"]["BLIND_SPOT_RIGHT"] == 1
self.lkas_counter = cp_cam.vl["LKAS_COMMAND"]["COUNTER"]
self.lkas_car_model = cp_cam.vl["LKAS_HUD"]["CAR_MODEL"]
self.lkas_status_ok = cp_cam.vl["LKAS_HEARTBIT"]["LKAS_STATUS_OK"]
self.button_counter = cp.vl["WHEEL_BUTTONS"]["COUNTER"]
return ret
@staticmethod
def get_can_parser(CP):
signals = [
# sig_name, sig_address
("PRNDL", "GEAR"),
("DOOR_OPEN_FL", "BCM_1"),
("DOOR_OPEN_FR", "BCM_1"),
("DOOR_OPEN_RL", "BCM_1"),
("DOOR_OPEN_RR", "BCM_1"),
("Brake_Pedal_State", "ESP_1"),
("Accelerator_Position", "ECM_5"),
("SPEED_LEFT", "SPEED_1"),
("SPEED_RIGHT", "SPEED_1"),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS"),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS"),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS"),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS"),
("STEER_ANGLE", "STEERING"),
("STEERING_RATE", "STEERING"),
("TURN_SIGNALS", "STEERING_LEVERS"),
("ACC_AVAILABLE", "DAS_3"),
("ACC_ACTIVE", "DAS_3"),
("ACC_FAULTED", "DAS_3"),
("HIGH_BEAM_FLASH", "STEERING_LEVERS"),
("ACC_SPEED_CONFIG_KPH", "DASHBOARD"),
("CRUISE_STATE", "DASHBOARD"),
("TORQUE_DRIVER", "EPS_STATUS"),
("TORQUE_MOTOR", "EPS_STATUS"),
("LKAS_STATE", "EPS_STATUS"),
("COUNTER", "EPS_STATUS",),
("TRACTION_OFF", "TRACTION_BUTTON"),
("SEATBELT_DRIVER_UNLATCHED", "SEATBELT_STATUS"),
("COUNTER", "WHEEL_BUTTONS"),
]
checks = [
# sig_address, frequency
("ESP_1", 50),
("EPS_STATUS", 100),
("SPEED_1", 100),
("WHEEL_SPEEDS", 50),
("STEERING", 100),
("DAS_3", 50),
("GEAR", 50),
("ECM_5", 50),
("WHEEL_BUTTONS", 50),
("DASHBOARD", 15),
("STEERING_LEVERS", 10),
("SEATBELT_STATUS", 2),
("BCM_1", 1),
("TRACTION_BUTTON", 1),
]
if CP.enableBsm:
signals += [
("BLIND_SPOT_RIGHT", "BLIND_SPOT_WARNINGS"),
("BLIND_SPOT_LEFT", "BLIND_SPOT_WARNINGS"),
]
checks.append(("BLIND_SPOT_WARNINGS", 2))
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
signals = [
# sig_name, sig_address
("COUNTER", "LKAS_COMMAND"),
("CAR_MODEL", "LKAS_HUD"),
("LKAS_STATUS_OK", "LKAS_HEARTBIT")
]
checks = [
("LKAS_COMMAND", 100),
("LKAS_HEARTBIT", 10),
("LKAS_HUD", 4),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
| 36.159236
| 131
| 0.630791
|
from cereal import car
from common.conversions import Conversions as CV
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.shifter_values = can_define.dv["GEAR"]["PRNDL"]
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
self.frame = int(cp.vl["EPS_STATUS"]["COUNTER"])
ret.doorOpen = any([cp.vl["BCM_1"]["DOOR_OPEN_FL"],
cp.vl["BCM_1"]["DOOR_OPEN_FR"],
cp.vl["BCM_1"]["DOOR_OPEN_RL"],
cp.vl["BCM_1"]["DOOR_OPEN_RR"]])
ret.seatbeltUnlatched = cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_UNLATCHED"] == 1
ret.brake = 0
ret.brakePressed = cp.vl["ESP_1"]['Brake_Pedal_State'] == 1
ret.gas = cp.vl["ECM_5"]["Accelerator_Position"]
ret.gasPressed = ret.gas > 1e-5
ret.espDisabled = (cp.vl["TRACTION_BUTTON"]["TRACTION_OFF"] == 1)
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FL"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FR"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RL"],
cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RR"],
unit=1,
)
ret.vEgoRaw = (cp.vl["SPEED_1"]["SPEED_LEFT"] + cp.vl["SPEED_1"]["SPEED_RIGHT"]) / 2.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = not ret.vEgoRaw > 0.001
ret.leftBlinker = cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 1
ret.rightBlinker = cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 2
ret.steeringAngleDeg = cp.vl["STEERING"]["STEER_ANGLE"]
ret.steeringRateDeg = cp.vl["STEERING"]["STEERING_RATE"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl["GEAR"]["PRNDL"], None))
ret.cruiseState.available = cp.vl["DAS_3"]["ACC_AVAILABLE"] == 1
ret.cruiseState.enabled = cp.vl["DAS_3"]["ACC_ACTIVE"] == 1
ret.cruiseState.speed = cp.vl["DASHBOARD"]["ACC_SPEED_CONFIG_KPH"] * CV.KPH_TO_MS
ret.cruiseState.nonAdaptive = cp.vl["DASHBOARD"]["CRUISE_STATE"] in (1, 2)
ret.accFaulted = cp.vl["DAS_3"]["ACC_FAULTED"] != 0
ret.steeringTorque = cp.vl["EPS_STATUS"]["TORQUE_DRIVER"]
ret.steeringTorqueEps = cp.vl["EPS_STATUS"]["TORQUE_MOTOR"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
steer_state = cp.vl["EPS_STATUS"]["LKAS_STATE"]
ret.steerFaultPermanent = steer_state == 4 or (steer_state == 0 and ret.vEgo > self.CP.minSteerSpeed)
ret.genericToggle = bool(cp.vl["STEERING_LEVERS"]["HIGH_BEAM_FLASH"])
if self.CP.enableBsm:
ret.leftBlindspot = cp.vl["BLIND_SPOT_WARNINGS"]["BLIND_SPOT_LEFT"] == 1
ret.rightBlindspot = cp.vl["BLIND_SPOT_WARNINGS"]["BLIND_SPOT_RIGHT"] == 1
self.lkas_counter = cp_cam.vl["LKAS_COMMAND"]["COUNTER"]
self.lkas_car_model = cp_cam.vl["LKAS_HUD"]["CAR_MODEL"]
self.lkas_status_ok = cp_cam.vl["LKAS_HEARTBIT"]["LKAS_STATUS_OK"]
self.button_counter = cp.vl["WHEEL_BUTTONS"]["COUNTER"]
return ret
@staticmethod
def get_can_parser(CP):
signals = [
("PRNDL", "GEAR"),
("DOOR_OPEN_FL", "BCM_1"),
("DOOR_OPEN_FR", "BCM_1"),
("DOOR_OPEN_RL", "BCM_1"),
("DOOR_OPEN_RR", "BCM_1"),
("Brake_Pedal_State", "ESP_1"),
("Accelerator_Position", "ECM_5"),
("SPEED_LEFT", "SPEED_1"),
("SPEED_RIGHT", "SPEED_1"),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS"),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS"),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS"),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS"),
("STEER_ANGLE", "STEERING"),
("STEERING_RATE", "STEERING"),
("TURN_SIGNALS", "STEERING_LEVERS"),
("ACC_AVAILABLE", "DAS_3"),
("ACC_ACTIVE", "DAS_3"),
("ACC_FAULTED", "DAS_3"),
("HIGH_BEAM_FLASH", "STEERING_LEVERS"),
("ACC_SPEED_CONFIG_KPH", "DASHBOARD"),
("CRUISE_STATE", "DASHBOARD"),
("TORQUE_DRIVER", "EPS_STATUS"),
("TORQUE_MOTOR", "EPS_STATUS"),
("LKAS_STATE", "EPS_STATUS"),
("COUNTER", "EPS_STATUS",),
("TRACTION_OFF", "TRACTION_BUTTON"),
("SEATBELT_DRIVER_UNLATCHED", "SEATBELT_STATUS"),
("COUNTER", "WHEEL_BUTTONS"),
]
checks = [
("ESP_1", 50),
("EPS_STATUS", 100),
("SPEED_1", 100),
("WHEEL_SPEEDS", 50),
("STEERING", 100),
("DAS_3", 50),
("GEAR", 50),
("ECM_5", 50),
("WHEEL_BUTTONS", 50),
("DASHBOARD", 15),
("STEERING_LEVERS", 10),
("SEATBELT_STATUS", 2),
("BCM_1", 1),
("TRACTION_BUTTON", 1),
]
if CP.enableBsm:
signals += [
("BLIND_SPOT_RIGHT", "BLIND_SPOT_WARNINGS"),
("BLIND_SPOT_LEFT", "BLIND_SPOT_WARNINGS"),
]
checks.append(("BLIND_SPOT_WARNINGS", 2))
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
signals = [
("COUNTER", "LKAS_COMMAND"),
("CAR_MODEL", "LKAS_HUD"),
("LKAS_STATUS_OK", "LKAS_HEARTBIT")
]
checks = [
("LKAS_COMMAND", 100),
("LKAS_HEARTBIT", 10),
("LKAS_HUD", 4),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
| true
| true
|
f71a301d080276930f713a265069db17067d03cb
| 43
|
py
|
Python
|
linguistics/bert/__init__.py
|
idin/mercurius
|
48a4ed7843fb5d1946ef8051f23da7b32ab52ca3
|
[
"MIT"
] | 7
|
2019-02-24T16:56:46.000Z
|
2022-01-30T03:26:49.000Z
|
linguistics/bert/__init__.py
|
idin/mercurius
|
48a4ed7843fb5d1946ef8051f23da7b32ab52ca3
|
[
"MIT"
] | 1
|
2020-07-14T21:00:57.000Z
|
2021-02-25T07:12:11.000Z
|
linguistics/bert/__init__.py
|
idin/linguistics
|
ab9568d81b225928beab353174fd97ccb0fe369c
|
[
"MIT"
] | null | null | null |
from .BertVectorizer import BertVectorizer
| 21.5
| 42
| 0.883721
|
from .BertVectorizer import BertVectorizer
| true
| true
|
f71a30533b6634f0a1e795ab1b2cb53461019bfe
| 1,928
|
py
|
Python
|
upvote/gae/lib/bit9/monitoring.py
|
iwikmai/upvote
|
77bb200d0e35a28cc5aed98ceee8e234998814b6
|
[
"Apache-2.0"
] | 453
|
2017-10-24T15:29:44.000Z
|
2021-09-27T23:21:20.000Z
|
upvote/gae/lib/bit9/monitoring.py
|
iwikmai/upvote
|
77bb200d0e35a28cc5aed98ceee8e234998814b6
|
[
"Apache-2.0"
] | 58
|
2018-03-23T21:19:16.000Z
|
2021-05-23T20:06:05.000Z
|
upvote/gae/lib/bit9/monitoring.py
|
iwikmai/upvote
|
77bb200d0e35a28cc5aed98ceee8e234998814b6
|
[
"Apache-2.0"
] | 36
|
2018-03-23T21:25:54.000Z
|
2021-09-27T23:21:24.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monitoring metrics for the bit9_api AppEngine module."""
import six
from upvote.gae.utils import monitoring_utils
from upvote.monitoring import metrics
# Remove once everything is PY3, where long == int
if six.PY3:
long = int # pylint: disable=redefined-builtin, invalid-name
events_to_pull = monitoring_utils.Metric(metrics.BIT9_API.EVENTS_TO_PULL, long)
events_pulled = monitoring_utils.Counter(metrics.BIT9_API.EVENTS_PULLED)
events_to_process = monitoring_utils.Metric(
metrics.BIT9_API.EVENTS_TO_PROCESS, long)
events_processed = monitoring_utils.Counter(metrics.BIT9_API.EVENTS_PROCESSED)
events_skipped = monitoring_utils.Counter(metrics.BIT9_API.EVENTS_SKIPPED)
pending_changes = monitoring_utils.Metric(metrics.BIT9_API.PENDING_CHANGES, long)
# Bit9 integration metrics
bit9_logins = monitoring_utils.SuccessFailureCounter(metrics.BIT9_API.BIT9_LOGINS)
bit9_qps = monitoring_utils.Counter(metrics.BIT9_API.BIT9_QPS)
bit9_requests = monitoring_utils.Counter(
metrics.BIT9_API.BIT9_REQUESTS,
fields=[('http_method', str), ('api_object', str), ('http_status', int)])
bit9_latency = monitoring_utils.LatencyMetric(
metrics.BIT9_API.BIT9_LATENCY,
fields=[('http_method', str), ('api_object', str)])
file_instances_missing = monitoring_utils.Counter(
metrics.BIT9_API.FILE_INSTANCES_MISSING)
| 41.913043
| 82
| 0.795643
|
import six
from upvote.gae.utils import monitoring_utils
from upvote.monitoring import metrics
if six.PY3:
long = int
events_to_pull = monitoring_utils.Metric(metrics.BIT9_API.EVENTS_TO_PULL, long)
events_pulled = monitoring_utils.Counter(metrics.BIT9_API.EVENTS_PULLED)
events_to_process = monitoring_utils.Metric(
metrics.BIT9_API.EVENTS_TO_PROCESS, long)
events_processed = monitoring_utils.Counter(metrics.BIT9_API.EVENTS_PROCESSED)
events_skipped = monitoring_utils.Counter(metrics.BIT9_API.EVENTS_SKIPPED)
pending_changes = monitoring_utils.Metric(metrics.BIT9_API.PENDING_CHANGES, long)
bit9_logins = monitoring_utils.SuccessFailureCounter(metrics.BIT9_API.BIT9_LOGINS)
bit9_qps = monitoring_utils.Counter(metrics.BIT9_API.BIT9_QPS)
bit9_requests = monitoring_utils.Counter(
metrics.BIT9_API.BIT9_REQUESTS,
fields=[('http_method', str), ('api_object', str), ('http_status', int)])
bit9_latency = monitoring_utils.LatencyMetric(
metrics.BIT9_API.BIT9_LATENCY,
fields=[('http_method', str), ('api_object', str)])
file_instances_missing = monitoring_utils.Counter(
metrics.BIT9_API.FILE_INSTANCES_MISSING)
| true
| true
|
f71a332a571fb8fd40a02f9f22795f51a43552c4
| 4,280
|
py
|
Python
|
single_query_extract.py
|
Gguinet/semisupervised-alignment
|
4f914c2e95ef69fa3aefe312fb9b12e482c6f0b5
|
[
"MIT"
] | 2
|
2021-01-16T14:12:21.000Z
|
2021-12-31T10:15:39.000Z
|
single_query_extract.py
|
Gguinet/semisupervised-alignment
|
4f914c2e95ef69fa3aefe312fb9b12e482c6f0b5
|
[
"MIT"
] | null | null | null |
single_query_extract.py
|
Gguinet/semisupervised-alignment
|
4f914c2e95ef69fa3aefe312fb9b12e482c6f0b5
|
[
"MIT"
] | 1
|
2021-03-06T15:52:49.000Z
|
2021-03-06T15:52:49.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Modifications for Guinet et al.
import io
import warnings
import numpy as np
import argparse
from utils import *
from query_aux import *
#Disable warnings for Meta-features
warnings.filterwarnings("ignore")
# to use bool for parsing
def str2bool(v):
"""Parse String to bool
Args:
v: String or Bool
Returns:
bool
Raises:
ArgumentTypeError: If v is not a String nor a bool
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(description="Extraction of queries simplified")
parser.add_argument(
"--src_emb", type=str, default="", help="Load source embeddings for training"
)
parser.add_argument(
"--tgt_emb", type=str, default="", help="Load target embeddings for validation"
)
parser.add_argument(
"--filename", type=str, default="", help="Filename of lightsvm files extracted"
)
parser.add_argument(
"--center", action="store_true", help="whether to center embeddings or not"
)
parser.add_argument(
"--dico", type=str, default="", help="Dictionary for query extraction"
)
parser.add_argument("--maxload", type=int, default=200000)
parser.add_argument(
"--query_relevance_type",
type=str,
default="",
help="Type of query relevance: binary or continuous",
)
parser.add_argument("--query_size", type=int, default=10, help="Size of the query")
parser.add_argument(
"--add_csls_coord",
type=str2bool,
default=True,
help="Whether to add to query coord CSLS distance",
)
parser.add_argument(
"--k_csls",
type=int,
default=10,
help="Number of coord in query for CSLS distance (from 0 to k)",
)
parser.add_argument(
"--testing_query",
type=str2bool,
default=False,
help="Whether to impose the ground truth traduction presence in the query",
)
parser.add_argument(
"--add_word_coord",
type=str2bool,
default=False,
help="Whether to add to query coord word embedding",
)
parser.add_argument(
"--discard_empty_query",
type=str2bool,
default=False,
help="Whether to remove query without the right traduction or not",
)
parser.add_argument(
"--use_csls",
type=str2bool,
default=False,
help="Whether to use CSLS distance or CosineSim",
)
parser.add_argument(
"--add_query_coord",
type=str2bool,
default=False,
help="Whether to add to query coord query word embedding",
)
parser.add_argument(
"--add_meta_features",
type=str2bool,
default=True,
help="Whether to add to meta-features of the 2 clouds (source and target)",
)
parser.add_argument(
"--center_meta_features",
type=str2bool,
default=True,
help="Whether to add to center the meta-features of the target clouds",
)
parser.add_argument(
"--nn_size_meta_features",
type=int,
default=10,
help="Number of neighbors to use when computing meta-features",
)
params = parser.parse_args()
###### MAIN ######
query_extractor = (
compute_binary_distance
if params.query_relevance_type == "binary"
else compute_embedding_distance
)
print("Extraction of queries alignment on %s" % params.dico)
words_tgt, x_tgt = load_vectors(
params.tgt_emb, maxload=params.maxload, center=params.center
)
words_src, x_src = load_vectors(
params.src_emb, maxload=params.maxload, center=params.center
)
print("Loading and extracting data")
src2tgt, lexicon_size = load_lexicon(params.dico, words_src, words_tgt)
query_extractor(
x_src,
x_tgt,
params.filename,
src2tgt,
add_csls_coord=params.add_csls_coord,
k_csls=params.k_csls,
testing_query=params.testing_query,
discard_empty_query=params.discard_empty_query,
add_word_coord=params.add_word_coord,
add_query_coord=params.add_query_coord,
add_meta_features=params.add_meta_features,
center_meta_features=params.center_meta_features,
nn_size_meta_features=params.nn_size_meta_features,
query_size=params.query_size,
use_csls=params.use_csls
)
print("Query file extracted")
| 25.628743
| 83
| 0.700935
|
import io
import warnings
import numpy as np
import argparse
from utils import *
from query_aux import *
warnings.filterwarnings("ignore")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(description="Extraction of queries simplified")
parser.add_argument(
"--src_emb", type=str, default="", help="Load source embeddings for training"
)
parser.add_argument(
"--tgt_emb", type=str, default="", help="Load target embeddings for validation"
)
parser.add_argument(
"--filename", type=str, default="", help="Filename of lightsvm files extracted"
)
parser.add_argument(
"--center", action="store_true", help="whether to center embeddings or not"
)
parser.add_argument(
"--dico", type=str, default="", help="Dictionary for query extraction"
)
parser.add_argument("--maxload", type=int, default=200000)
parser.add_argument(
"--query_relevance_type",
type=str,
default="",
help="Type of query relevance: binary or continuous",
)
parser.add_argument("--query_size", type=int, default=10, help="Size of the query")
parser.add_argument(
"--add_csls_coord",
type=str2bool,
default=True,
help="Whether to add to query coord CSLS distance",
)
parser.add_argument(
"--k_csls",
type=int,
default=10,
help="Number of coord in query for CSLS distance (from 0 to k)",
)
parser.add_argument(
"--testing_query",
type=str2bool,
default=False,
help="Whether to impose the ground truth traduction presence in the query",
)
parser.add_argument(
"--add_word_coord",
type=str2bool,
default=False,
help="Whether to add to query coord word embedding",
)
parser.add_argument(
"--discard_empty_query",
type=str2bool,
default=False,
help="Whether to remove query without the right traduction or not",
)
parser.add_argument(
"--use_csls",
type=str2bool,
default=False,
help="Whether to use CSLS distance or CosineSim",
)
parser.add_argument(
"--add_query_coord",
type=str2bool,
default=False,
help="Whether to add to query coord query word embedding",
)
parser.add_argument(
"--add_meta_features",
type=str2bool,
default=True,
help="Whether to add to meta-features of the 2 clouds (source and target)",
)
parser.add_argument(
"--center_meta_features",
type=str2bool,
default=True,
help="Whether to add to center the meta-features of the target clouds",
)
parser.add_argument(
"--nn_size_meta_features",
type=int,
default=10,
help="Number of neighbors to use when computing meta-features",
)
params = parser.parse_args()
"
else compute_embedding_distance
)
print("Extraction of queries alignment on %s" % params.dico)
words_tgt, x_tgt = load_vectors(
params.tgt_emb, maxload=params.maxload, center=params.center
)
words_src, x_src = load_vectors(
params.src_emb, maxload=params.maxload, center=params.center
)
print("Loading and extracting data")
src2tgt, lexicon_size = load_lexicon(params.dico, words_src, words_tgt)
query_extractor(
x_src,
x_tgt,
params.filename,
src2tgt,
add_csls_coord=params.add_csls_coord,
k_csls=params.k_csls,
testing_query=params.testing_query,
discard_empty_query=params.discard_empty_query,
add_word_coord=params.add_word_coord,
add_query_coord=params.add_query_coord,
add_meta_features=params.add_meta_features,
center_meta_features=params.center_meta_features,
nn_size_meta_features=params.nn_size_meta_features,
query_size=params.query_size,
use_csls=params.use_csls
)
print("Query file extracted")
| true
| true
|
f71a33492bc89ba75ddffd485b3bbc63fcd86dc9
| 29,388
|
py
|
Python
|
source/deepsecurity/api/mac_lists_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/api/mac_lists_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/api/mac_lists_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class MACListsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_mac_list(self, mac_list, api_version, **kwargs): # noqa: E501
"""Create a MAC List # noqa: E501
Create a new MAC list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mac_list(mac_list, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MacList mac_list: The settings of the new MAC list. (required)
:param str api_version: The version of the api being called. (required)
:return: MacList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_mac_list_with_http_info(mac_list, api_version, **kwargs) # noqa: E501
else:
(data) = self.create_mac_list_with_http_info(mac_list, api_version, **kwargs) # noqa: E501
return data
def create_mac_list_with_http_info(self, mac_list, api_version, **kwargs): # noqa: E501
"""Create a MAC List # noqa: E501
Create a new MAC list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mac_list_with_http_info(mac_list, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MacList mac_list: The settings of the new MAC list. (required)
:param str api_version: The version of the api being called. (required)
:return: MacList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mac_list', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_mac_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mac_list' is set
if ('mac_list' not in params or
params['mac_list'] is None):
raise ValueError("Missing the required parameter `mac_list` when calling `create_mac_list`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `create_mac_list`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'mac_list' in params:
body_params = params['mac_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/maclists', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_mac_list(self, mac_list_id, api_version, **kwargs): # noqa: E501
"""Delete a MAC List # noqa: E501
Delete a MAC list by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mac_list(mac_list_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mac_list_id: The ID number of the MAC list to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_mac_list_with_http_info(mac_list_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.delete_mac_list_with_http_info(mac_list_id, api_version, **kwargs) # noqa: E501
return data
def delete_mac_list_with_http_info(self, mac_list_id, api_version, **kwargs): # noqa: E501
"""Delete a MAC List # noqa: E501
Delete a MAC list by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mac_list_with_http_info(mac_list_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mac_list_id: The ID number of the MAC list to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mac_list_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mac_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mac_list_id' is set
if ('mac_list_id' not in params or
params['mac_list_id'] is None):
raise ValueError("Missing the required parameter `mac_list_id` when calling `delete_mac_list`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `delete_mac_list`") # noqa: E501
if 'mac_list_id' in params and not re.search('\\d+', str(params['mac_list_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `mac_list_id` when calling `delete_mac_list`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'mac_list_id' in params:
path_params['macListID'] = params['mac_list_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/maclists/{macListID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def describe_mac_list(self, mac_list_id, api_version, **kwargs): # noqa: E501
"""Describe a MAC List # noqa: E501
Describe a MAC list by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_mac_list(mac_list_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mac_list_id: The ID number of the MAC list to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: MacList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_mac_list_with_http_info(mac_list_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_mac_list_with_http_info(mac_list_id, api_version, **kwargs) # noqa: E501
return data
def describe_mac_list_with_http_info(self, mac_list_id, api_version, **kwargs): # noqa: E501
"""Describe a MAC List # noqa: E501
Describe a MAC list by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_mac_list_with_http_info(mac_list_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mac_list_id: The ID number of the MAC list to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: MacList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mac_list_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_mac_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mac_list_id' is set
if ('mac_list_id' not in params or
params['mac_list_id'] is None):
raise ValueError("Missing the required parameter `mac_list_id` when calling `describe_mac_list`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_mac_list`") # noqa: E501
if 'mac_list_id' in params and not re.search('\\d+', str(params['mac_list_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `mac_list_id` when calling `describe_mac_list`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'mac_list_id' in params:
path_params['macListID'] = params['mac_list_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/maclists/{macListID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_mac_lists(self, api_version, **kwargs): # noqa: E501
"""List MAC Lists # noqa: E501
Lists all MAC lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mac_lists(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: MacLists
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_mac_lists_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.list_mac_lists_with_http_info(api_version, **kwargs) # noqa: E501
return data
def list_mac_lists_with_http_info(self, api_version, **kwargs): # noqa: E501
"""List MAC Lists # noqa: E501
Lists all MAC lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mac_lists_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: MacLists
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_mac_lists" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_mac_lists`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/maclists', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacLists', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_mac_list(self, mac_list_id, mac_list, api_version, **kwargs): # noqa: E501
"""Modify a MAC List # noqa: E501
Modify a MAC list by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_mac_list(mac_list_id, mac_list, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mac_list_id: The ID number of the MAC list to modify. (required)
:param MacList mac_list: The settings of the MAC list to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: MacList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_mac_list_with_http_info(mac_list_id, mac_list, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_mac_list_with_http_info(mac_list_id, mac_list, api_version, **kwargs) # noqa: E501
return data
def modify_mac_list_with_http_info(self, mac_list_id, mac_list, api_version, **kwargs): # noqa: E501
"""Modify a MAC List # noqa: E501
Modify a MAC list by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_mac_list_with_http_info(mac_list_id, mac_list, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mac_list_id: The ID number of the MAC list to modify. (required)
:param MacList mac_list: The settings of the MAC list to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: MacList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mac_list_id', 'mac_list', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_mac_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mac_list_id' is set
if ('mac_list_id' not in params or
params['mac_list_id'] is None):
raise ValueError("Missing the required parameter `mac_list_id` when calling `modify_mac_list`") # noqa: E501
# verify the required parameter 'mac_list' is set
if ('mac_list' not in params or
params['mac_list'] is None):
raise ValueError("Missing the required parameter `mac_list` when calling `modify_mac_list`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_mac_list`") # noqa: E501
if 'mac_list_id' in params and not re.search('\\d+', str(params['mac_list_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `mac_list_id` when calling `modify_mac_list`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'mac_list_id' in params:
path_params['macListID'] = params['mac_list_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'mac_list' in params:
body_params = params['mac_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/maclists/{macListID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_mac_lists(self, api_version, **kwargs): # noqa: E501
"""Search MAC Lists # noqa: E501
Search for MAC lists using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_mac_lists(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: MacLists
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_mac_lists_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.search_mac_lists_with_http_info(api_version, **kwargs) # noqa: E501
return data
def search_mac_lists_with_http_info(self, api_version, **kwargs): # noqa: E501
"""Search MAC Lists # noqa: E501
Search for MAC lists using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_mac_lists_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: MacLists
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version', 'search_filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_mac_lists" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `search_mac_lists`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'search_filter' in params:
body_params = params['search_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/maclists/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacLists', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.281296
| 311
| 0.605928
|
from __future__ import absolute_import
import re
import six
from deepsecurity.api_client import ApiClient
class MACListsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_mac_list(self, mac_list, api_version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_mac_list_with_http_info(mac_list, api_version, **kwargs)
else:
(data) = self.create_mac_list_with_http_info(mac_list, api_version, **kwargs)
return data
def create_mac_list_with_http_info(self, mac_list, api_version, **kwargs):
all_params = ['mac_list', 'api_version']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_mac_list" % key
)
params[key] = val
del params['kwargs']
if ('mac_list' not in params or
params['mac_list'] is None):
raise ValueError("Missing the required parameter `mac_list` when calling `create_mac_list`")
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `create_mac_list`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version']
form_params = []
local_var_files = {}
body_params = None
if 'mac_list' in params:
body_params = params['mac_list']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['DefaultAuthentication']
return self.api_client.call_api(
'/maclists', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_mac_list(self, mac_list_id, api_version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_mac_list_with_http_info(mac_list_id, api_version, **kwargs)
else:
(data) = self.delete_mac_list_with_http_info(mac_list_id, api_version, **kwargs)
return data
def delete_mac_list_with_http_info(self, mac_list_id, api_version, **kwargs):
all_params = ['mac_list_id', 'api_version']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mac_list" % key
)
params[key] = val
del params['kwargs']
if ('mac_list_id' not in params or
params['mac_list_id'] is None):
raise ValueError("Missing the required parameter `mac_list_id` when calling `delete_mac_list`")
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `delete_mac_list`")
if 'mac_list_id' in params and not re.search('\\d+', str(params['mac_list_id'])):
raise ValueError("Invalid value for parameter `mac_list_id` when calling `delete_mac_list`, must conform to the pattern `/\\d+/`")
collection_formats = {}
path_params = {}
if 'mac_list_id' in params:
path_params['macListID'] = params['mac_list_id']
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['DefaultAuthentication']
return self.api_client.call_api(
'/maclists/{macListID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def describe_mac_list(self, mac_list_id, api_version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_mac_list_with_http_info(mac_list_id, api_version, **kwargs)
else:
(data) = self.describe_mac_list_with_http_info(mac_list_id, api_version, **kwargs)
return data
def describe_mac_list_with_http_info(self, mac_list_id, api_version, **kwargs):
all_params = ['mac_list_id', 'api_version']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_mac_list" % key
)
params[key] = val
del params['kwargs']
if ('mac_list_id' not in params or
params['mac_list_id'] is None):
raise ValueError("Missing the required parameter `mac_list_id` when calling `describe_mac_list`")
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_mac_list`")
if 'mac_list_id' in params and not re.search('\\d+', str(params['mac_list_id'])):
raise ValueError("Invalid value for parameter `mac_list_id` when calling `describe_mac_list`, must conform to the pattern `/\\d+/`")
collection_formats = {}
path_params = {}
if 'mac_list_id' in params:
path_params['macListID'] = params['mac_list_id']
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['DefaultAuthentication']
return self.api_client.call_api(
'/maclists/{macListID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_mac_lists(self, api_version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_mac_lists_with_http_info(api_version, **kwargs)
else:
(data) = self.list_mac_lists_with_http_info(api_version, **kwargs)
return data
def list_mac_lists_with_http_info(self, api_version, **kwargs):
all_params = ['api_version']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_mac_lists" % key
)
params[key] = val
del params['kwargs']
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_mac_lists`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['DefaultAuthentication']
return self.api_client.call_api(
'/maclists', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacLists',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_mac_list(self, mac_list_id, mac_list, api_version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_mac_list_with_http_info(mac_list_id, mac_list, api_version, **kwargs)
else:
(data) = self.modify_mac_list_with_http_info(mac_list_id, mac_list, api_version, **kwargs)
return data
def modify_mac_list_with_http_info(self, mac_list_id, mac_list, api_version, **kwargs):
all_params = ['mac_list_id', 'mac_list', 'api_version']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_mac_list" % key
)
params[key] = val
del params['kwargs']
if ('mac_list_id' not in params or
params['mac_list_id'] is None):
raise ValueError("Missing the required parameter `mac_list_id` when calling `modify_mac_list`")
if ('mac_list' not in params or
params['mac_list'] is None):
raise ValueError("Missing the required parameter `mac_list` when calling `modify_mac_list`")
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_mac_list`")
if 'mac_list_id' in params and not re.search('\\d+', str(params['mac_list_id'])):
raise ValueError("Invalid value for parameter `mac_list_id` when calling `modify_mac_list`, must conform to the pattern `/\\d+/`")
collection_formats = {}
path_params = {}
if 'mac_list_id' in params:
path_params['macListID'] = params['mac_list_id']
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version']
form_params = []
local_var_files = {}
body_params = None
if 'mac_list' in params:
body_params = params['mac_list']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['DefaultAuthentication']
return self.api_client.call_api(
'/maclists/{macListID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_mac_lists(self, api_version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_mac_lists_with_http_info(api_version, **kwargs)
else:
(data) = self.search_mac_lists_with_http_info(api_version, **kwargs)
return data
def search_mac_lists_with_http_info(self, api_version, **kwargs):
all_params = ['api_version', 'search_filter']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_mac_lists" % key
)
params[key] = val
del params['kwargs']
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `search_mac_lists`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version']
form_params = []
local_var_files = {}
body_params = None
if 'search_filter' in params:
body_params = params['search_filter']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['DefaultAuthentication']
return self.api_client.call_api(
'/maclists/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MacLists',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
f71a3354afd52b38a1b508cdd629a00d472d8746
| 2,651
|
py
|
Python
|
tests/test_logger.py
|
agraubert/agutil
|
d9a568df01959ed985c9c8e77bdd501ac13bdbbf
|
[
"MIT"
] | 3
|
2017-06-05T15:46:22.000Z
|
2019-05-22T21:26:54.000Z
|
tests/test_logger.py
|
agraubert/agutil
|
d9a568df01959ed985c9c8e77bdd501ac13bdbbf
|
[
"MIT"
] | 93
|
2016-06-22T18:57:47.000Z
|
2022-02-14T10:50:27.000Z
|
tests/test_logger.py
|
agraubert/agutil
|
d9a568df01959ed985c9c8e77bdd501ac13bdbbf
|
[
"MIT"
] | null | null | null |
import unittest
import unittest.mock
import os
from py_compile import compile
import sys
import random
import time
import tempfile
from filecmp import cmp
def make_random_string(length=25, lower=0, upper=255):
return "".join(chr(random.randint(lower,upper)) for i in range(length))
def tempname():
(handle, name) = tempfile.mkstemp()
os.close(handle)
return name
class test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.script_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
"agutil",
"src",
"logger.py"
)
cls.data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
'logger'
)
sys.path.append(os.path.dirname(os.path.dirname(cls.script_path)))
random.seed()
def test_compilation(self):
compiled_path = compile(self.script_path)
self.assertTrue(compiled_path)
@unittest.skipIf(sys.platform.startswith('win'), "Tempfile cannot be used in this way on Windows")
def test_basic_logging(self):
import agutil.src.logger
time_mock = unittest.mock.Mock(side_effect = lambda fmt, time=0:fmt)
agutil.src.logger.time.strftime = time_mock
output_file = tempname()
log = agutil.src.logger.Logger(output_file, loglevel=agutil.src.logger.Logger.LOGLEVEL_DETAIL)
log.log("Test message")
log.log("More messages!", sender="me")
log.log("OH NO! This one's an error!", "Foo", "ERROR")
foo_bound = log.bindToSender("Foo")
log.mute("Foo", "Bar")
foo_bound("Message 1")
foo_bound("Message 2")
log.log("This should appear in the log, but not the dump", "Bar", "WARN")
foo_bound("Message 3")
log.unmute("Foo")
log.log("I've been unmuted!", "Foo")
log.log("This should be a warning", "Anyone", "BLORG")
time.sleep(.2)
log.addChannel("BLORG", 15)
log.setChannelCollection("BLORG", True)
log.log("This should be seen", "Anyone", "BLORG")
log.setChannelCollection("WARN", False)
log.setChannelCollection("WARN", True)
time.sleep(.2)
log.log("This should appear in the dump", "Bar", "WARN")
time.sleep(.1)
self.assertFalse(log.close())
self.assertTrue(cmp(
output_file,
os.path.join(
self.data_path,
'logger_compare.txt'
)
))
os.remove(output_file)
| 32.728395
| 102
| 0.590343
|
import unittest
import unittest.mock
import os
from py_compile import compile
import sys
import random
import time
import tempfile
from filecmp import cmp
def make_random_string(length=25, lower=0, upper=255):
return "".join(chr(random.randint(lower,upper)) for i in range(length))
def tempname():
(handle, name) = tempfile.mkstemp()
os.close(handle)
return name
class test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.script_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
"agutil",
"src",
"logger.py"
)
cls.data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
'logger'
)
sys.path.append(os.path.dirname(os.path.dirname(cls.script_path)))
random.seed()
def test_compilation(self):
compiled_path = compile(self.script_path)
self.assertTrue(compiled_path)
@unittest.skipIf(sys.platform.startswith('win'), "Tempfile cannot be used in this way on Windows")
def test_basic_logging(self):
import agutil.src.logger
time_mock = unittest.mock.Mock(side_effect = lambda fmt, time=0:fmt)
agutil.src.logger.time.strftime = time_mock
output_file = tempname()
log = agutil.src.logger.Logger(output_file, loglevel=agutil.src.logger.Logger.LOGLEVEL_DETAIL)
log.log("Test message")
log.log("More messages!", sender="me")
log.log("OH NO! This one's an error!", "Foo", "ERROR")
foo_bound = log.bindToSender("Foo")
log.mute("Foo", "Bar")
foo_bound("Message 1")
foo_bound("Message 2")
log.log("This should appear in the log, but not the dump", "Bar", "WARN")
foo_bound("Message 3")
log.unmute("Foo")
log.log("I've been unmuted!", "Foo")
log.log("This should be a warning", "Anyone", "BLORG")
time.sleep(.2)
log.addChannel("BLORG", 15)
log.setChannelCollection("BLORG", True)
log.log("This should be seen", "Anyone", "BLORG")
log.setChannelCollection("WARN", False)
log.setChannelCollection("WARN", True)
time.sleep(.2)
log.log("This should appear in the dump", "Bar", "WARN")
time.sleep(.1)
self.assertFalse(log.close())
self.assertTrue(cmp(
output_file,
os.path.join(
self.data_path,
'logger_compare.txt'
)
))
os.remove(output_file)
| true
| true
|
f71a33a61a60a199f194543768784c8caef1eda7
| 7,886
|
py
|
Python
|
python/pm4pyPlus.py
|
rivei/pm4py_with_dash
|
05ed524c11b44932783864a4465d400ea1300910
|
[
"MIT"
] | null | null | null |
python/pm4pyPlus.py
|
rivei/pm4py_with_dash
|
05ed524c11b44932783864a4465d400ea1300910
|
[
"MIT"
] | null | null | null |
python/pm4pyPlus.py
|
rivei/pm4py_with_dash
|
05ed524c11b44932783864a4465d400ea1300910
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 1 22:17:20 2019
@author: Wei
"""
#from dash_app import default_log as log
import pandas as pd
import numpy as np
#import pytz
from datetime import datetime, tzinfo,timedelta
from pm4py.statistics.traces.log import case_statistics
from pm4py.algo.filtering.log.attributes import attributes_filter
MAX_TRACES = 9999
def filtered_log_df(log, top_trace_n = MAX_TRACES):
# if top_trace_n == MAX_TRACES:
# traces_with_count = case_statistics.get_variant_statistics(log) #parameters=("max_variants_to_return":5)
# #df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])
# df = pd.DataFrame()
# df.columns = ['caseid','actid','actseq','resid','ts','sT']
# else:
n_cases = 0
caseid = []
actid = []
actseq = []
resid = []
ts = []
startTime = []
for case in log:
actidx = 0
startT = case[0]['time:timestamp'].timestamp()
for event in case:
caseid.append(n_cases)
actid.append(event['concept:name'])
actseq.append(actidx)
resid.append(event['org:resource'])
ts.append(event['time:timestamp'].timestamp())
startTime.append(event['time:timestamp'].timestamp() - startT)
actidx = actidx + 1
n_cases = n_cases + 1
df = pd.DataFrame({'caseid': caseid,
'actid':actid,
'actseq':actseq,
'resid':resid,
'ts':ts,
'sT': startTime})
df['preid'] = df['actid'].shift(1)
df['preid'] = df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)
return df
def n_cases(log, top_trace_n = MAX_TRACES):
if top_trace_n == MAX_TRACES:
df = filtered_log_df(log)
else:
df = filtered_log_df(log, top_trace_n)
return len(df['caseid'].unique())
def n_events(log):
df = filtered_log_df(log)
return len(df)
def n_activities(log):
df = filtered_log_df(log)
return len(df['actid'].unique())
def n_resources(log):
df = filtered_log_df(log)
return len(df['resid'].unique())
def n_traces(log, top_trace_n = MAX_TRACES):
if top_trace_n == MAX_TRACES:
traces_with_count = case_statistics.get_variant_statistics(log) #parameters=("max_variants_to_return":5)
else:
traces_with_count = case_statistics.get_variant_statistics(log, parameters={"max_variants_to_return":top_trace_n})
df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])
return len(df)
def acts_df(log):
activities = attributes_filter.get_attribute_values(log, "concept:name")
actid = []
cnt = []
for act0 in activities.items():
actid.append(act0[0])
cnt.append(act0[1])
return pd.DataFrame({'id':actid, 'cnt':cnt})
def traces_df(log):
traces = case_statistics.get_variant_statistics(log)
tid = []
actid = []
actseq = []
cnt = []
n_traces = 0
for trace in traces:
actidx = 0
acts = trace['variant']
for s in acts.split(','):
tid.append(n_traces)
actid.append(s)
actseq.append(actidx)
cnt.append(trace['count'])
actidx = actidx+1
n_traces = n_traces + 1
trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})
trace_df['preid'] = trace_df['actid'].shift(1)
trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)
trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+"@@"+row['actid'], axis = 1)
# def actid2num(sactid, df):
# nactid = -1
# for i in range(0, len(df)):
# if df['id'][i] == sactid:
# nactid = i/len(df)
# return nactid
#
# act_df = acts_df(log)
# trace_df['nactid'] = trace_df['actid'].apply(lambda i:actid2num(i, act_df))
return trace_df
def sort_df(log):
df = filtered_log_df(log)
dur = np.zeros(len(df))
evS = 0
evE = -1
for i in range(0, len(df)):
if df['actseq'][i] == 0:
evS = i
if i < len(df) - 1:
if df['actseq'][i + 1] == 0:
evE = i
else:
evE = i
if evE >= evS:
for j in range(evS, evE+1):
dur[j] = df['sT'][evE-1]
df['dur'] = dur
sort_df = df.sort_values(by=['dur','caseid', 'actseq'], ascending = [0,1,1])
sortid = 0
sid = np.zeros(len(sort_df))
for i in range(1, len(sort_df)):
if i < len(sort_df) - 1:
if sort_df.iloc[i,:]['caseid'] != sort_df.iloc[i-1,:]['caseid']:
sortid = sortid + 1
sid[i] = sortid
sort_df['sid'] = sid
return sort_df
def mtx_df(log):
df = traces_df(log)
prelist = (df['preid'].unique())
actlist = (df['actid'].unique())
dff = pd.DataFrame(columns=prelist,index = actlist)
# dff.columns = actlist
# dff.index = prelist
mtxdf1 = df.groupby('pre_post')['cnt'].sum() #agg(['sum','count','mean'])
#mtxdf1['abs'] = mtxdf1['sum']/mtxdf1['count']
# mtxdf= pd.DataFrame({'pre_post':mtxdf1.index, 'cnt': list(mtxdf1)})
for s in mtxdf1.index:
a = s.split("@@")
if len(a) != 2:
print(a[0], a[1])
else:
dff[a[0]][a[1]] = mtxdf1[s]
return dff
#
#activities = log_attributes_filter.get_attribute_values(log, "concept:name")
#actid = []
#cnt = []
#for act0 in activities.items():
# actid.append(act0[0])
# cnt.append(act0[1])
#
#act_df = pd.DataFrame({'id':actid, 'cnt':cnt})
#
#n_activities = len(act_df)
#
#from pm4py.statistics.traces.log import case_statistics
#traces = case_statistics.get_variant_statistics(log)#, parameters={"max_variants_to_return": 5})
#
##acts = []
##cnt = []
##tid = []
##idx = 0
##for trace in traces:
## tid.append(idx)
## acts.append(trace['variant'])
## cnt.append(trace['count'])
## idx = idx + 1
##
##trace_df = pd.DataFrame({'id': tid, 'acts': acts, 'cnt':cnt})
##n_traces = len(trace_df)
#
#tid = []
#actid = []
#actseq = []
#cnt = []
#n_traces = 0
#for trace in traces:
# actidx = 0
# acts = trace['variant']
# for s in acts.split(','):
# tid.append(n_traces)
# actid.append(s)
# actseq.append(actidx)
# cnt.append(trace['count'])
# actidx = actidx+1
# n_traces = n_traces + 1
#
#trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})
#trace_df['preid'] = trace_df['actid'].shift(1)
#trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)
##trace_df['postid'] = trace_df['actid'].shift(1)
##trace_df['postid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)
#
#trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+"-"+row['actid'], axis = 1)
#
#def actid2num(sactid, df):
# nactid = -1
# for i in range(0, len(df)):
# if df['id'][i] == sactid:
# nactid = i/len(df)
# return nactid
#
##actid2num("Confirmation of receipt", act_df)
#
#trace_df['nactid'] = trace_df['actid'].apply(lambda i:actid2num(i, act_df))
#
## matrix
#df['pre_post'] = df.apply(lambda row: row['preid']+"-"+row['actid'], axis = 1)
##mtxdf1 = pd.DataFrame({'ant':df['preid'],'con':df})
#mtxdf1 = df[df['preid']!='START'].groupby('pre_post')['caseid'].count() #agg(['sum','count','mean'])
##mtxdf1['abs'] = mtxdf1['sum']/mtxdf1['count']
#mtxdf= pd.DataFrame({'pre_post':mtxdf1.index, 'cnt': list(mtxdf1)})
#
##roles Detection: related to resource vs activity?
##from pm4py.algo.enhancement.roles import factory as roles_factory
##roles = roles_factory.apply(log)
#aaa
| 30.565891
| 122
| 0.578113
|
import pandas as pd
import numpy as np
from datetime import datetime, tzinfo,timedelta
from pm4py.statistics.traces.log import case_statistics
from pm4py.algo.filtering.log.attributes import attributes_filter
MAX_TRACES = 9999
def filtered_log_df(log, top_trace_n = MAX_TRACES):
me = []
for case in log:
actidx = 0
startT = case[0]['time:timestamp'].timestamp()
for event in case:
caseid.append(n_cases)
actid.append(event['concept:name'])
actseq.append(actidx)
resid.append(event['org:resource'])
ts.append(event['time:timestamp'].timestamp())
startTime.append(event['time:timestamp'].timestamp() - startT)
actidx = actidx + 1
n_cases = n_cases + 1
df = pd.DataFrame({'caseid': caseid,
'actid':actid,
'actseq':actseq,
'resid':resid,
'ts':ts,
'sT': startTime})
df['preid'] = df['actid'].shift(1)
df['preid'] = df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)
return df
def n_cases(log, top_trace_n = MAX_TRACES):
if top_trace_n == MAX_TRACES:
df = filtered_log_df(log)
else:
df = filtered_log_df(log, top_trace_n)
return len(df['caseid'].unique())
def n_events(log):
df = filtered_log_df(log)
return len(df)
def n_activities(log):
df = filtered_log_df(log)
return len(df['actid'].unique())
def n_resources(log):
df = filtered_log_df(log)
return len(df['resid'].unique())
def n_traces(log, top_trace_n = MAX_TRACES):
if top_trace_n == MAX_TRACES:
traces_with_count = case_statistics.get_variant_statistics(log)
else:
traces_with_count = case_statistics.get_variant_statistics(log, parameters={"max_variants_to_return":top_trace_n})
df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])
return len(df)
def acts_df(log):
activities = attributes_filter.get_attribute_values(log, "concept:name")
actid = []
cnt = []
for act0 in activities.items():
actid.append(act0[0])
cnt.append(act0[1])
return pd.DataFrame({'id':actid, 'cnt':cnt})
def traces_df(log):
traces = case_statistics.get_variant_statistics(log)
tid = []
actid = []
actseq = []
cnt = []
n_traces = 0
for trace in traces:
actidx = 0
acts = trace['variant']
for s in acts.split(','):
tid.append(n_traces)
actid.append(s)
actseq.append(actidx)
cnt.append(trace['count'])
actidx = actidx+1
n_traces = n_traces + 1
trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})
trace_df['preid'] = trace_df['actid'].shift(1)
trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)
trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+"@@"+row['actid'], axis = 1)
return trace_df
def sort_df(log):
df = filtered_log_df(log)
dur = np.zeros(len(df))
evS = 0
evE = -1
for i in range(0, len(df)):
if df['actseq'][i] == 0:
evS = i
if i < len(df) - 1:
if df['actseq'][i + 1] == 0:
evE = i
else:
evE = i
if evE >= evS:
for j in range(evS, evE+1):
dur[j] = df['sT'][evE-1]
df['dur'] = dur
sort_df = df.sort_values(by=['dur','caseid', 'actseq'], ascending = [0,1,1])
sortid = 0
sid = np.zeros(len(sort_df))
for i in range(1, len(sort_df)):
if i < len(sort_df) - 1:
if sort_df.iloc[i,:]['caseid'] != sort_df.iloc[i-1,:]['caseid']:
sortid = sortid + 1
sid[i] = sortid
sort_df['sid'] = sid
return sort_df
def mtx_df(log):
df = traces_df(log)
prelist = (df['preid'].unique())
actlist = (df['actid'].unique())
dff = pd.DataFrame(columns=prelist,index = actlist)
mtxdf1 = df.groupby('pre_post')['cnt'].sum()
for s in mtxdf1.index:
a = s.split("@@")
if len(a) != 2:
print(a[0], a[1])
else:
dff[a[0]][a[1]] = mtxdf1[s]
return dff
| true
| true
|
f71a348d15db2579bb6b6dd7bce60ef5fc4a8a65
| 4,854
|
py
|
Python
|
pypureclient/flasharray/FA_2_8/models/active_directory.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_8/models/active_directory.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_8/models/active_directory.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_8 import models
class ActiveDirectory(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'computer_name': 'str',
'directory_servers': 'list[str]',
'domain': 'str',
'kerberos_servers': 'list[str]'
}
attribute_map = {
'name': 'name',
'computer_name': 'computer_name',
'directory_servers': 'directory_servers',
'domain': 'domain',
'kerberos_servers': 'kerberos_servers'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
computer_name=None, # type: str
directory_servers=None, # type: List[str]
domain=None, # type: str
kerberos_servers=None, # type: List[str]
):
"""
Keyword args:
name (str): A locally unique, system-generated name. The name cannot be modified.
computer_name (str): The name of the computer account in the Active Directory domain.
directory_servers (list[str]): A list of directory servers used for lookups related to user authorization. Servers must be specified in FQDN format. All specified servers must be registered to the domain appropriately in the configured DNS of the array and are only communicated with over the secure LDAP (LDAPS) protocol. If this field is `null`, the servers are resolved for the domain in DNS.
domain (str): The Active Directory domain joined.
kerberos_servers (list[str]): A list of key distribution servers to use for Kerberos protocol. Servers must be specified in FQDN format. All specified servers must be registered to the domain appropriately in the configured DNS of the array. If this field is `null`, the servers are resolved for the domain in DNS.
"""
if name is not None:
self.name = name
if computer_name is not None:
self.computer_name = computer_name
if directory_servers is not None:
self.directory_servers = directory_servers
if domain is not None:
self.domain = domain
if kerberos_servers is not None:
self.kerberos_servers = kerberos_servers
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectory`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ActiveDirectory, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActiveDirectory):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.691176
| 407
| 0.592707
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_8 import models
class ActiveDirectory(object):
swagger_types = {
'name': 'str',
'computer_name': 'str',
'directory_servers': 'list[str]',
'domain': 'str',
'kerberos_servers': 'list[str]'
}
attribute_map = {
'name': 'name',
'computer_name': 'computer_name',
'directory_servers': 'directory_servers',
'domain': 'domain',
'kerberos_servers': 'kerberos_servers'
}
required_args = {
}
def __init__(
self,
name=None,
computer_name=None,
directory_servers=None,
domain=None,
kerberos_servers=None,
):
if name is not None:
self.name = name
if computer_name is not None:
self.computer_name = computer_name
if directory_servers is not None:
self.directory_servers = directory_servers
if domain is not None:
self.domain = domain
if kerberos_servers is not None:
self.kerberos_servers = kerberos_servers
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectory`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ActiveDirectory, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ActiveDirectory):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f71a3506e2c79b16c7a1c6ca335f47af41777dc9
| 2,781
|
py
|
Python
|
antz/io.py
|
jmschrei/antz
|
74c901f543279b1904f2db9f3a70e5dcc7ade182
|
[
"MIT"
] | 3
|
2015-05-10T16:00:20.000Z
|
2016-06-22T22:03:05.000Z
|
antz/io.py
|
jmschrei/antz
|
74c901f543279b1904f2db9f3a70e5dcc7ade182
|
[
"MIT"
] | null | null | null |
antz/io.py
|
jmschrei/antz
|
74c901f543279b1904f2db9f3a70e5dcc7ade182
|
[
"MIT"
] | null | null | null |
# io.py
# Contact: Jacob Schreiber
# jmschr@cs.washington.edu
'''
This script focuses on data input and output, and currently supports the
following files:
* FastA
'''
from seq import *
class FastA( object ):
'''
This is a FastA file. It can contain many DNA, RNA, or Protein
sequences in it. This can be read in or written out.
'''
def __init__( self, sequences ):
'''
If sequences are passed in, they should be as the DNA, RNA, or protein
objects, so that all metadata is written out as well.
'''
self.sequences = sequences
def __str__( self ):
'''
String representation of the FastA
'''
return '\n'.join( sequence.to_fasta() for sequence in self.sequences )
def to_file( self, filename, attrs=None ):
'''
Write out a FastA file. Attrs specifies the attributes you want to
write out as well, in that order. Since any data can be stored in these
objects, it allows you to pick both what you want to write out, and
in what order. If nothing is provided, nothing is written out.
'''
with open( filename, 'w' ) as outfile:
# Write out each stored sequence
for sequence in self.sequences:
outfile.write( sequence.to_fasta( attrs ) )
@classmethod
def from_file( cls, filename, attrs=None, delimiter=' ', seqType=None ):
'''
Read in a FastA file. Given names for each delimited item in the
comments by specifying their attribute in order. Specify the seqType
as the class object or string.
'''
if isinstance( seqType, str ):
if seqType.lower() == 'protein':
seqType = Protein
elif seqType.lower() == 'rna':
seqType = RNA
elif seqType.lower() == 'dna':
seqType = DNA
else:
seqType = Sequence
seqType = seqType or Sequence
sequences = []
with open( filename, 'r' ) as infile:
comments, sequence = None, ''
# Go through the file line by line
for line in infile:
# If the next line starts with a >, it means that the previous
# sequence has come to an end.
if line.startswith( '>' ):
# If a sequence has been found, create and append the
# sequence object
if sequence != '':
comments = comments.split( delimiter )
attributes = { attr: comment for attr, comment in zip( attrs, comments ) }
sequences.append( seqType( sequence, **attributes ) )
# Now get the comment, removing the > and any newlines
comments = line[1:].strip('\r\n')
# Reset the sequence
sequence = ''
else:
# Otherwise, append the sequence line to the growing
# sequence
sequence += line.strip('\r\n')
comments = comments.split( delimiter )
attributes = { attr: comment for attr, comment in zip( attrs, comments )}
sequences.append( seqType( sequence, **attributes ) )
return cls( sequences )
| 28.670103
| 80
| 0.665948
|
from seq import *
class FastA( object ):
def __init__( self, sequences ):
self.sequences = sequences
def __str__( self ):
return '\n'.join( sequence.to_fasta() for sequence in self.sequences )
def to_file( self, filename, attrs=None ):
with open( filename, 'w' ) as outfile:
for sequence in self.sequences:
outfile.write( sequence.to_fasta( attrs ) )
@classmethod
def from_file( cls, filename, attrs=None, delimiter=' ', seqType=None ):
if isinstance( seqType, str ):
if seqType.lower() == 'protein':
seqType = Protein
elif seqType.lower() == 'rna':
seqType = RNA
elif seqType.lower() == 'dna':
seqType = DNA
else:
seqType = Sequence
seqType = seqType or Sequence
sequences = []
with open( filename, 'r' ) as infile:
comments, sequence = None, ''
for line in infile:
if line.startswith( '>' ):
if sequence != '':
comments = comments.split( delimiter )
attributes = { attr: comment for attr, comment in zip( attrs, comments ) }
sequences.append( seqType( sequence, **attributes ) )
comments = line[1:].strip('\r\n')
sequence = ''
else:
sequence += line.strip('\r\n')
comments = comments.split( delimiter )
attributes = { attr: comment for attr, comment in zip( attrs, comments )}
sequences.append( seqType( sequence, **attributes ) )
return cls( sequences )
| true
| true
|
f71a3706a5e1e09a9b5ac6542d63281e2cb4bab7
| 1,370
|
py
|
Python
|
tests/test_platform_api.py
|
jain-aayush1123/here-location-services-python
|
11ad5ef8273b4f243c43bc00ebd470f725b980bc
|
[
"Apache-2.0"
] | 16
|
2021-02-15T13:49:29.000Z
|
2022-03-29T10:34:43.000Z
|
tests/test_platform_api.py
|
jain-aayush1123/here-location-services-python
|
11ad5ef8273b4f243c43bc00ebd470f725b980bc
|
[
"Apache-2.0"
] | 8
|
2021-02-27T18:40:46.000Z
|
2021-10-03T15:49:27.000Z
|
tests/test_platform_api.py
|
jain-aayush1123/here-location-services-python
|
11ad5ef8273b4f243c43bc00ebd470f725b980bc
|
[
"Apache-2.0"
] | 11
|
2021-02-16T04:58:08.000Z
|
2022-02-21T20:51:55.000Z
|
# Copyright (C) 2019-2021 HERE Europe B.V.
# SPDX-License-Identifier: Apache-2.0
"""This module will test platform api module."""
import pytest
from requests_oauthlib import OAuth1
from here_location_services.platform.apis.aaa_oauth2_api import AAAOauth2Api
from here_location_services.platform.apis.api import Api as PlaformApi
from here_location_services.utils import get_apikey
from tests.conftest import get_mock_response
LS_API_KEY = get_apikey()
def test_api_headers_property():
api = PlaformApi(access_token="dummy")
assert api.headers == {"Authorization": "Bearer dummy"}
def test_mock_request_post(mocker):
mocker.patch("requests.post", return_value=True)
api = PlaformApi(access_token="dummy")
resp = api.post("dummy_url", data={"foo": "bar"})
assert resp is True
def test_mock_request_scoped_access_token_excception(mocker):
reason = "This is mock reason"
text = "This is mock text"
mock_response = get_mock_response(500, reason, text)
mocker.patch("here_location_services.platform.apis.api.Api.post", return_value=mock_response)
aaa_api = AAAOauth2Api(base_url="dummy")
oauth = OAuth1(
"dummy_key",
client_secret="dummy_secret",
signature_method="HMAC-SHA256",
)
with pytest.raises(Exception):
aaa_api.request_scoped_access_token(oauth=oauth, data="dummy_data")
| 34.25
| 97
| 0.750365
|
import pytest
from requests_oauthlib import OAuth1
from here_location_services.platform.apis.aaa_oauth2_api import AAAOauth2Api
from here_location_services.platform.apis.api import Api as PlaformApi
from here_location_services.utils import get_apikey
from tests.conftest import get_mock_response
LS_API_KEY = get_apikey()
def test_api_headers_property():
api = PlaformApi(access_token="dummy")
assert api.headers == {"Authorization": "Bearer dummy"}
def test_mock_request_post(mocker):
mocker.patch("requests.post", return_value=True)
api = PlaformApi(access_token="dummy")
resp = api.post("dummy_url", data={"foo": "bar"})
assert resp is True
def test_mock_request_scoped_access_token_excception(mocker):
reason = "This is mock reason"
text = "This is mock text"
mock_response = get_mock_response(500, reason, text)
mocker.patch("here_location_services.platform.apis.api.Api.post", return_value=mock_response)
aaa_api = AAAOauth2Api(base_url="dummy")
oauth = OAuth1(
"dummy_key",
client_secret="dummy_secret",
signature_method="HMAC-SHA256",
)
with pytest.raises(Exception):
aaa_api.request_scoped_access_token(oauth=oauth, data="dummy_data")
| true
| true
|
f71a37cbfdc3fa96ea44404d682a0922befa7d2d
| 13,580
|
py
|
Python
|
scripts/blame_opt.py
|
regehr/yarpgen
|
025a8cb90df018578c892ec82051ddf74388ec2f
|
[
"Apache-2.0"
] | null | null | null |
scripts/blame_opt.py
|
regehr/yarpgen
|
025a8cb90df018578c892ec82051ddf74388ec2f
|
[
"Apache-2.0"
] | null | null | null |
scripts/blame_opt.py
|
regehr/yarpgen
|
025a8cb90df018578c892ec82051ddf74388ec2f
|
[
"Apache-2.0"
] | 1
|
2021-03-02T08:54:02.000Z
|
2021-03-02T08:54:02.000Z
|
#!/usr/bin/python3
###############################################################################
#
# Copyright (c) 2015-2020, Intel Corporation
# Copyright (c) 2019-2020, University of Utah
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
Experimental script for automatic sorting of errors, basing on failed optimization phase
"""
###############################################################################
import logging
import os
import re
import common
import gen_test_makefile
import run_gen
icc_blame_opts = ["-from_rtn=0 -to_rtn=", "-num_opt=", "-num-case="]
icc_opt_patterns = ["\(\d+\)", "\(\d+\)\s*\n", "DO ANOTHER.*\(\d+\)"]
icc_opt_name_prefix = "DOING\s*\[\w*\]\s*"
icc_opt_name_suffix = "\s*\(\d*\)\s*\(last opt\)"
icx_blame_opts = ["-mllvm -opt-bisect-limit="]
icx_opt_patterns = ["BISECT: running pass \(\d+\)"]
icx_opt_name_prefix = "BISECT: running pass \(\d+\) "
icx_opt_name_suffix = " \(.*\)"
clang_blame_opts = ["-mllvm -opt-bisect-limit="]
clang_opt_patterns = ["BISECT: running pass \(\d+\)"]
clang_opt_name_prefix = "BISECT: running pass \(\d+\) "
clang_opt_name_suffix = " \(.*\)"
dpcpp_gpu_blame_opts = ["IGC_ShaderDumpEnableAll=1 IGC_ShaderDisableOptPassesAfter="]
dpcpp_gpu_patterns = ["Skipping optimization pass: .* (threshold: \(\d+\))."]
dpcpp_gpu_opt_name_prefix = "Skipping optimization pass: '"
dpcpp_gpu_opt_name_suffix = "' \(.*\)."
compilers_blame_opts = {"icc": icc_blame_opts, "icx": icx_blame_opts, "clang": clang_blame_opts, "dpcpp": dpcpp_gpu_blame_opts}
compilers_blame_patterns = {"icc": icc_opt_patterns, "icx": icx_opt_patterns, "clang": clang_opt_patterns, "dpcpp": dpcpp_gpu_patterns}
compilers_opt_name_cutter = {"icc": [icc_opt_name_prefix, icc_opt_name_suffix], \
"icx": [icx_opt_name_prefix, icx_opt_name_suffix], \
"clang": [clang_opt_name_prefix, clang_opt_name_suffix], \
"dpcpp": [dpcpp_gpu_opt_name_prefix, dpcpp_gpu_opt_name_suffix]}
blame_test_makefile_name = "Blame_Makefile"
###############################################################################
def get_next_step(start, end, current, fail_flag):
if fail_flag:
next_start = start
next_current = (current - start) // 2 + start
next_end = current
else:
next_start = current
next_current = (end - current) // 2 + current
next_end = end
return next_start, next_end, next_current
def dump_exec_output(msg, ret_code, output, err_output, time_expired, num):
common.log_msg(logging.DEBUG, msg + " (process " + str(num) + ")")
common.log_msg(logging.DEBUG, "Ret code: " + str(ret_code) + " | process " + str(num))
common.log_msg(logging.DEBUG, "Time exp: " + str(time_expired) + " | process " + str(num))
common.log_msg(logging.DEBUG, "Output: " + str(output, "utf-8") + " | process " + str(num))
common.log_msg(logging.DEBUG, "Err output: " + str(err_output, "utf-8") + " | process " + str(num))
def execute_blame_phase(valid_res, fail_target, inject_str, num, phase_num):
gen_test_makefile.gen_makefile(
out_file_name = blame_test_makefile_name,
force = True,
config_file = None,
only_target = fail_target,
inject_blame_opt = inject_str + "-1" if fail_target.specs.name != "dpcpp" else None,
inject_blame_env = inject_str + "1" if fail_target.specs.name == "dpcpp" else None)
ret_code, output, err_output, time_expired, elapsed_time = \
common.run_cmd(["make", "-f", blame_test_makefile_name, fail_target.name], run_gen.compiler_timeout, num)
if fail_target.specs.name == "dpcpp":
ret_code, output, err_output, time_expired, elapsed_time = \
common.run_cmd(["make", "-f", blame_test_makefile_name, "run_" + fail_target.name], run_gen.compiler_timeout, num)
opt_num_regex = re.compile(compilers_blame_patterns[fail_target.specs.name][phase_num])
try:
if fail_target.specs.name == "dpcpp":
max_opt_num = 250
else:
matches = opt_num_regex.findall(str(err_output, "utf-8"))
# Some icc phases may not support going to phase "2", i.e. drilling down to num_case level,
# in this case we are done.
if phase_num == 2 and not matches:
return str(-1)
max_opt_num_str = matches[-1]
remove_brackets_pattern = re.compile("\d+")
max_opt_num = int(remove_brackets_pattern.findall(max_opt_num_str)[-1])
common.log_msg(logging.DEBUG, "Max opt num (process " + str(num) + "): " + str(max_opt_num))
except IndexError:
common.log_msg(logging.ERROR, "Can't decode max opt number using \"" + compilers_blame_patterns[fail_target.specs.name][phase_num]
+ "\" regexp (phase " + str(phase_num) + ") in the following output:\n" + str(err_output, "utf-8")
+ " (process " + str(num) + "): ")
raise
start_opt = 0
end_opt = max_opt_num
cur_opt = max_opt_num
failed_flag = True
time_to_finish = False
while not time_to_finish:
start_opt, end_opt, cur_opt = get_next_step(start_opt, end_opt, cur_opt, failed_flag)
common.log_msg(logging.DEBUG, "Previous failed (process " + str(num) + "): " + str(failed_flag))
failed_flag = False
eff = ((start_opt + 1) >= cur_opt) # Earliest fail was found
common.log_msg(logging.DEBUG, "Trying opt (process " + str(num) + "): " + str(start_opt) + "/" + str(cur_opt) + "/" + str(end_opt))
gen_test_makefile.gen_makefile(
out_file_name = blame_test_makefile_name,
force = True,
config_file = None,
only_target = fail_target,
inject_blame_opt = inject_str + str(cur_opt) if fail_target.specs.name != "dpcpp" else None,
inject_blame_env = inject_str + str(cur_opt) if fail_target.specs.name == "dpcpp" else None)
ret_code, output, err_output, time_expired, elapsed_time = \
common.run_cmd(["make", "-f", blame_test_makefile_name, fail_target.name], run_gen.compiler_timeout, num)
if time_expired or ret_code != 0:
dump_exec_output("Compilation failed", ret_code, output, err_output, time_expired, num)
failed_flag = True
if not eff:
continue
else:
break
ret_code, output, err_output, time_expired, elapsed_time = \
common.run_cmd(["make", "-f", blame_test_makefile_name, "run_" + fail_target.name], run_gen.run_timeout, num)
if time_expired or ret_code != 0:
dump_exec_output("Execution failed", ret_code, output, err_output, time_expired, num)
failed_flag = True
if not eff:
continue
else:
break
if str(output, "utf-8").split()[-1] != valid_res:
common.log_msg(logging.DEBUG, "Output differs (process " + str(num) + "): " + str(output, "utf-8").split()[-1] + " vs " + valid_res + " (expected)")
failed_flag = True
if not eff:
continue
else:
break
time_to_finish = (eff and failed_flag) or (eff and not failed_flag and (cur_opt == (end_opt - 1)))
common.log_msg(logging.DEBUG, "Time to finish (process " + str(num) + "): " + str(time_to_finish))
if not failed_flag:
common.log_msg(logging.DEBUG, "Swapping current and end opt (process " + str(num) + ")")
cur_opt = end_opt
common.log_msg(logging.DEBUG, "Finished blame phase, result: " + str(inject_str) + str(cur_opt) + " (process " + str(num) + ")")
return cur_opt
def blame(fail_dir, valid_res, fail_target, out_dir, lock, num, inplace):
blame_str = ""
stdout = stderr = b""
if not re.search("-O0", fail_target.args):
blame_opts = compilers_blame_opts[fail_target.specs.name]
phase_num = 0
blame_phase_num = 0
# Do blaming
try:
for i in blame_opts:
blame_str += i
blame_phase_num = execute_blame_phase(valid_res, fail_target, blame_str, num, phase_num)
if fail_target.specs.name == "dpcpp":
# Special case becasue triagging mechanism is different and there's only one level of triagging.
blame_str += str(blame_phase_num-1)
else:
blame_str += str(blame_phase_num)
blame_str += " "
phase_num += 1
except:
common.log_msg(logging.ERROR, "Something went wrong while executing blame_opt.py on " + str(fail_dir))
return False
# Wrap up results
gen_test_makefile.gen_makefile(
out_file_name = blame_test_makefile_name,
force = True,
config_file = None,
only_target = fail_target,
inject_blame_opt = blame_str if fail_target.specs.name != "dpcpp" else None,
inject_blame_env = blame_str if fail_target.specs.name == "dpcpp" else None)
ret_code, stdout, stderr, time_expired, elapsed_time = \
common.run_cmd(["make", "-f", blame_test_makefile_name, fail_target.name], run_gen.compiler_timeout, num)
if fail_target.specs.name == "dpcpp":
ret_code, stdout, stderr, time_expired, elapsed_time = \
common.run_cmd(["make", "-f", blame_test_makefile_name, "run_" + fail_target.name], run_gen.compiler_timeout, num)
if fail_target.specs.name != "dpcpp":
opt_name_pattern = re.compile(compilers_opt_name_cutter[fail_target.specs.name][0] + ".*" +
compilers_opt_name_cutter[fail_target.specs.name][1])
opt_name = opt_name_pattern.findall(str(stderr, "utf-8"))[-1]
opt_name = re.sub(compilers_opt_name_cutter[fail_target.specs.name][0], "", opt_name)
opt_name = re.sub(compilers_opt_name_cutter[fail_target.specs.name][1], "", opt_name)
real_opt_name = opt_name
opt_name = opt_name.replace(" ", "_")
else:
if blame_phase_num == 1:
# It's special case for DPC++. 1 means that triagging failed, no specific phase can be blamed.
real_opt_name = opt_name = "FailedToBlame"
else:
opt_name_pattern = re.compile(compilers_opt_name_cutter[fail_target.specs.name][0] + ".*" +
compilers_opt_name_cutter[fail_target.specs.name][1])
opt_name = opt_name_pattern.findall(str(stderr, "utf-8"))[0]
opt_name = re.sub(compilers_opt_name_cutter[fail_target.specs.name][0], "", opt_name)
opt_name = re.sub(compilers_opt_name_cutter[fail_target.specs.name][1], "", opt_name)
real_opt_name = opt_name
opt_name = opt_name.replace(" ", "_")
else:
real_opt_name = opt_name = "O0_bug"
common.run_cmd(["make", "-f", blame_test_makefile_name, "clean"], run_gen.compiler_timeout, num)
seed_dir = os.path.basename(os.path.normpath(fail_dir))
# Create log files in different places depending on "inplace" switch.
if not inplace:
full_out_path = os.path.join(os.path.join(out_dir, opt_name), seed_dir)
common.copy_test_to_out(fail_dir, full_out_path, lock)
else:
full_out_path = "."
# Write to log
with open(os.path.join(full_out_path, "log.txt"), "a") as log_file:
log_file.write("\nBlaming for " + fail_target.name + " optset was done.\n")
log_file.write("Optimization to blame: " + real_opt_name + "\n")
log_file.write("Blame opts: " + blame_str + "\n\n")
log_file.write("Details of blaming run:\n")
log_file.write("=== Compiler log ==================================================\n")
log_file.write(str(stdout, "utf-8"))
log_file.write("=== Compiler err ==================================================\n")
log_file.write(str(stderr, "utf-8"))
log_file.write("=== Compiler end ==================================================\n")
common.log_msg(logging.DEBUG, "Done blaming")
# Inplace mode require blaming string to be communicated back to the caller
if not inplace:
return True
else:
return real_opt_name
def prepare_env_and_blame(fail_dir, valid_res, fail_target, out_dir, lock, num, inplace=False):
common.log_msg(logging.DEBUG, "Blaming target: " + fail_target.name + " | " + fail_target.specs.name)
os.chdir(fail_dir)
if fail_target.specs.name not in compilers_blame_opts:
common.log_msg(logging.DEBUG, "We can't blame " + fail_target.name + " (process " + str(num) + ")")
return False
return blame(fail_dir, valid_res, fail_target, out_dir, lock, num, inplace)
| 48.848921
| 160
| 0.60891
| true
| true
|
|
f71a37e5c9f3342edb98fd5bc2f1279f8371e8c8
| 27,693
|
py
|
Python
|
src/python/turicreate/data_structures/sketch.py
|
pappasG/turicreate
|
494e313957a6c01333628b182a7d5bc6efea18f8
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/turicreate/data_structures/sketch.py
|
pappasG/turicreate
|
494e313957a6c01333628b182a7d5bc6efea18f8
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/turicreate/data_structures/sketch.py
|
pappasG/turicreate
|
494e313957a6c01333628b182a7d5bc6efea18f8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Efficiently compute the approximate statistics over an SArray.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from .._cython.cy_sketch import UnitySketchProxy
from .._cython.context import debug_trace as cython_context
from .sarray import SArray
from .sframe import SFrame
import operator
from math import sqrt
__all__ = ['Sketch']
class Sketch(object):
"""
The Sketch object contains a sketch of a single SArray (a column of an
SFrame). Using a sketch representation of an SArray, many approximate and
exact statistics can be computed very quickly.
To construct a Sketch object, the following methods are equivalent:
>>> my_sarray = turicreate.SArray([1,2,3,4,5])
>>> sketch = turicreate.Sketch(my_sarray)
>>> sketch = my_sarray.summary()
Typically, the SArray is a column of an SFrame:
>>> my_sframe = turicreate.SFrame({'column1': [1,2,3]})
>>> sketch = turicreate.Sketch(my_sframe['column1'])
>>> sketch = my_sframe['column1'].summary()
The sketch computation is fast, with complexity approximately linear in the
length of the SArray. After the Sketch is computed, all queryable functions
are performed nearly instantly.
A sketch can compute the following information depending on the dtype of the
SArray:
For numeric columns, the following information is provided exactly:
- length (:func:`~turicreate.Sketch.size`)
- number of missing Values (:func:`~turicreate.Sketch.num_missing`)
- minimum value (:func:`~turicreate.Sketch.min`)
- maximum value (:func:`~turicreate.Sketch.max`)
- mean (:func:`~turicreate.Sketch.mean`)
- variance (:func:`~turicreate.Sketch.var`)
- standard deviation (:func:`~turicreate.Sketch.std`)
And the following information is provided approximately:
- number of unique values (:func:`~turicreate.Sketch.num_unique`)
- quantiles (:func:`~turicreate.Sketch.quantile`)
- frequent items (:func:`~turicreate.Sketch.frequent_items`)
- frequency count for any value (:func:`~turicreate.Sketch.frequency_count`)
For non-numeric columns(str), the following information is provided exactly:
- length (:func:`~turicreate.Sketch.size`)
- number of missing values (:func:`~turicreate.Sketch.num_missing`)
And the following information is provided approximately:
- number of unique Values (:func:`~turicreate.Sketch.num_unique`)
- frequent items (:func:`~turicreate.Sketch.frequent_items`)
- frequency count of any value (:func:`~turicreate.Sketch.frequency_count`)
For SArray of type list or array, there is a sub sketch for all sub elements.
The sub sketch flattens all list/array values and then computes sketch
summary over flattened values. Element sub sketch may be retrieved through:
- element_summary(:func:`~turicreate.Sketch.element_summary`)
For SArray of type dict, there are sub sketches for both dict key and value.
The sub sketch may be retrieved through:
- dict_key_summary(:func:`~turicreate.Sketch.dict_key_summary`)
- dict_value_summary(:func:`~turicreate.Sketch.dict_value_summary`)
For SArray of type dict, user can also pass in a list of dictionary keys to
summary function, this would generate one sub sketch for each key.
For example:
>>> sa = turicreate.SArray([{'a':1, 'b':2}, {'a':3}])
>>> sketch = sa.summary(sub_sketch_keys=["a", "b"])
Then the sub summary may be retrieved by:
>>> sketch.element_sub_sketch()
or to get subset keys:
>>> sketch.element_sub_sketch(["a"])
Similarly, for SArray of type vector(array), user can also pass in a list of
integers which is the index into the vector to get sub sketch
For example:
>>> sa = turicreate.SArray([[100,200,300,400,500], [100,200,300], [400,500]])
>>> sketch = sa.summary(sub_sketch_keys=[1,3,5])
Then the sub summary may be retrieved by:
>>> sketch.element_sub_sketch()
Or:
>>> sketch.element_sub_sketch([1,3])
for subset of keys
Please see the individual function documentation for detail about each of
these statistics.
Parameters
----------
array : SArray
Array to generate sketch summary.
background : boolean
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
References
----------
- Wikipedia. `Streaming algorithms. <http://en.wikipedia.org/wiki/Streaming_algorithm>`_
- Charikar, et al. (2002) `Finding frequent items in data streams.
<https://www.cs.rutgers.edu/~farach/pubs/FrequentStream.pdf>`_
- Cormode, G. and Muthukrishnan, S. (2004) `An Improved Data Stream Summary:
The Count-Min Sketch and its Applications.
<http://dimacs.rutgers.edu/~graham/pubs/papers/cm-latin.pdf>`_
"""
def __init__(self, array=None, background=False, sub_sketch_keys=[], _proxy=None):
"""__init__(array)
Construct a new Sketch from an SArray.
Parameters
----------
array : SArray
Array to sketch.
background : boolean, optional
If true, run the sketch in background. The the state of the sketch
may be queried by calling (:func:`~turicreate.Sketch.sketch_ready`)
default is False
sub_sketch_keys : list
The list of sub sketch to calculate, for SArray of dictionary type.
key needs to be a string, for SArray of vector(array) type, the key
needs to be positive integer
"""
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySketchProxy()
if not isinstance(array, SArray):
raise TypeError("Sketch object can only be constructed from SArrays")
self.__proxy__.construct_from_sarray(array.__proxy__, background, sub_sketch_keys)
def __repr__(self):
"""
Emits a brief summary of all the statistics as a string.
"""
fields = [
['size', 'Length' , 'Yes'],
['min', 'Min' , 'Yes'],
['max', 'Max' , 'Yes'],
['mean', 'Mean' , 'Yes'],
['sum', 'Sum' , 'Yes'],
['var', 'Variance' , 'Yes'],
['std', 'Standard Deviation' , 'Yes'],
['num_missing', '# Missing Values' , 'Yes',],
['num_unique', '# unique values', 'No' ]
]
s = '\n'
result = []
for field in fields:
try:
method_to_call = getattr(self, field[0])
result.append([field[1], str(method_to_call()), field[2]])
except:
pass
sf = SArray(result).unpack(column_name_prefix = "")
sf.rename({'0': 'item', '1':'value', '2': 'is exact'}, inplace=True)
s += sf.__str__(footer=False)
s += "\n"
s += "\nMost frequent items:\n"
frequent = self.frequent_items()
# convert to string key
frequent_strkeys = {}
for key in frequent:
strkey = str(key)
if strkey in frequent_strkeys:
frequent_strkeys[strkey] += frequent[key]
else:
frequent_strkeys[strkey] = frequent[key]
sorted_freq = sorted(frequent_strkeys.items(), key=operator.itemgetter(1), reverse=True)
if len(sorted_freq) == 0:
s += " -- All elements appear with less than 0.01% frequency -- \n"
else:
sorted_freq = sorted_freq[:10]
sf = SFrame()
sf['value'] = [elem[0] for elem in sorted_freq]
sf['count'] = [elem[1] for elem in sorted_freq]
s += sf.__str__(footer=False) + "\n"
s += "\n"
try:
# print quantiles
self.quantile(0) # XXX: is this necessary?
s += "Quantiles: \n"
sf = SFrame()
for q in [0.0,0.01,0.05,0.25,0.5,0.75,0.95,0.99,1.00]:
sf.add_column(SArray([self.quantile(q)]), str(int(q * 100)) + '%', inplace=True)
s += sf.__str__(footer=False) + "\n"
except:
pass
try:
t_k = self.dict_key_summary()
t_v = self.dict_value_summary()
s += "\n******** Dictionary Element Key Summary ********\n"
s += t_k.__repr__()
s += "\n******** Dictionary Element Value Summary ********\n"
s += t_v.__repr__() + '\n'
except:
pass
try:
t_k = self.element_summary()
s += "\n******** Element Summary ********\n"
s += t_k.__repr__() + '\n'
except:
pass
return s.expandtabs(8)
def __str__(self):
"""
Emits a brief summary of all the statistics as a string.
"""
return self.__repr__()
def size(self):
"""
Returns the size of the input SArray.
Returns
-------
out : int
The number of elements of the input SArray.
"""
with cython_context():
return int(self.__proxy__.size())
def max(self):
"""
Returns the maximum value in the SArray. Returns *nan* on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
Throws an exception if the SArray is a non-numeric type.
Returns
-------
out : type of SArray
Maximum value of SArray. Returns nan if the SArray is empty.
"""
with cython_context():
return self.__proxy__.max()
def min(self):
"""
Returns the minimum value in the SArray. Returns *nan* on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : type of SArray
Minimum value of SArray. Returns nan if the sarray is empty.
"""
with cython_context():
return self.__proxy__.min()
def sum(self):
"""
Returns the sum of all the values in the SArray. Returns 0 on an empty
array. Throws an exception if called on an sarray with non-numeric type.
Will overflow without warning.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : type of SArray
Sum of all values in SArray. Returns 0 if the SArray is empty.
"""
with cython_context():
return self.__proxy__.sum()
def mean(self):
"""
Returns the mean of the values in the SArray. Returns 0 on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : float
Mean of all values in SArray. Returns 0 if the sarray is empty.
"""
with cython_context():
return self.__proxy__.mean()
def std(self):
"""
Returns the standard deviation of the values in the SArray. Returns 0 on
an empty array. Throws an exception if called on an SArray with
non-numeric type.
Returns
-------
out : float
The standard deviation of all the values. Returns 0 if the sarray is
empty.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
"""
return sqrt(self.var())
def var(self):
"""
Returns the variance of the values in the sarray. Returns 0 on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : float
The variance of all the values. Returns 0 if the SArray is empty.
"""
with cython_context():
return self.__proxy__.var()
def num_missing(self):
"""
Returns the the number of missing (i.e. None) values in the SArray.
Return 0 on an empty SArray.
Returns
-------
out : int
The number of missing values in the SArray.
"""
with cython_context():
return int(self.__proxy__.num_undefined())
def num_unique(self):
"""
Returns a sketched estimate of the number of unique values in the
SArray based on the Hyperloglog sketch.
Returns
-------
out : float
An estimate of the number of unique values in the SArray.
"""
with cython_context():
return int(self.__proxy__.num_unique())
def frequent_items(self):
"""
Returns a sketched estimate of the most frequent elements in the SArray
based on the SpaceSaving sketch. It is only guaranteed that all
elements which appear in more than 0.01% rows of the array will
appear in the set of returned elements. However, other elements may
also appear in the result. The item counts are estimated using
the CountSketch.
Missing values are not taken into account when computing frequent items.
If this function returns no elements, it means that all elements appear
with less than 0.01% occurrence.
Returns
-------
out : dict
A dictionary mapping items and their estimated occurrence frequencies.
"""
with cython_context():
return self.__proxy__.frequent_items()
def quantile(self, quantile_val):
"""
Returns a sketched estimate of the value at a particular quantile
between 0.0 and 1.0. The quantile is guaranteed to be accurate within
1%: meaning that if you ask for the 0.55 quantile, the returned value is
guaranteed to be between the true 0.54 quantile and the true 0.56
quantile. The quantiles are only defined for numeric arrays and this
function will throw an exception if called on a sketch constructed for a
non-numeric column.
Parameters
----------
quantile_val : float
A value between 0.0 and 1.0 inclusive. Values below 0.0 will be
interpreted as 0.0. Values above 1.0 will be interpreted as 1.0.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : float | str
An estimate of the value at a quantile.
"""
with cython_context():
return self.__proxy__.get_quantile(quantile_val)
def frequency_count(self, element):
"""
Returns a sketched estimate of the number of occurrences of a given
element. This estimate is based on the count sketch. The element type
must be of the same type as the input SArray. Throws an exception if
element is of the incorrect type.
Parameters
----------
element : val
An element of the same type as the SArray.
Raises
------
RuntimeError
Throws an exception if element is of the incorrect type.
Returns
-------
out : int
An estimate of the number of occurrences of the element.
"""
with cython_context():
return int(self.__proxy__.frequency_count(element))
def sketch_ready(self):
"""
Returns True if the sketch has been executed on all the data.
If the sketch is created with background == False (default), this will
always return True. Otherwise, this will return False until the sketch
is ready.
"""
with cython_context():
return self.__proxy__.sketch_ready()
def num_elements_processed(self):
"""
Returns the number of elements processed so far.
If the sketch is created with background == False (default), this will
always return the length of the input array. Otherwise, this will
return the number of elements processed so far.
"""
with cython_context():
return self.__proxy__.num_elements_processed()
def element_length_summary(self):
"""
Returns the sketch summary for the element length. This is only valid for
a sketch constructed SArray of type list/array/dict, raises Runtime
exception otherwise.
Examples
--------
>>> sa = turicreate.SArray([[j for j in range(i)] for i in range(1,1000)])
>>> sa.summary().element_length_summary()
+--------------------+---------------+----------+
| item | value | is exact |
+--------------------+---------------+----------+
| Length | 999 | Yes |
| Min | 1.0 | Yes |
| Max | 999.0 | Yes |
| Mean | 500.0 | Yes |
| Sum | 499500.0 | Yes |
| Variance | 83166.6666667 | Yes |
| Standard Deviation | 288.386314978 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 992 | No |
+--------------------+---------------+----------+
Most frequent items:
+-------+---+---+---+---+---+---+---+---+---+----+
| value | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
+-------+---+---+---+---+---+---+---+---+---+----+
| count | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
+-------+---+---+---+---+---+---+---+---+---+----+
Quantiles:
+-----+------+------+-------+-------+-------+-------+-------+-------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+------+------+-------+-------+-------+-------+-------+-------+
| 1.0 | 10.0 | 50.0 | 250.0 | 500.0 | 750.0 | 950.0 | 990.0 | 999.0 |
+-----+------+------+-------+-------+-------+-------+-------+-------+
Returns
-------
out : Sketch
An new sketch object regarding the element length of the current SArray
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.element_length_summary())
def dict_key_summary(self):
"""
Returns the sketch summary for all dictionary keys. This is only valid
for sketch object from an SArray of dict type. Dictionary keys are
converted to strings and then do the sketch summary.
Examples
--------
>>> sa = turicreate.SArray([{'I':1, 'love': 2}, {'nature':3, 'beauty':4}])
>>> sa.summary().dict_key_summary()
+------------------+-------+----------+
| item | value | is exact |
+------------------+-------+----------+
| Length | 4 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 4 | No |
+------------------+-------+----------+
Most frequent items:
+-------+---+------+--------+--------+
| value | I | love | beauty | nature |
+-------+---+------+--------+--------+
| count | 1 | 1 | 1 | 1 |
+-------+---+------+--------+--------+
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.dict_key_summary())
def dict_value_summary(self):
"""
Returns the sketch summary for all dictionary values. This is only valid
for sketch object from an SArray of dict type.
Type of value summary is inferred from first set of values.
Examples
--------
>>> sa = turicreate.SArray([{'I':1, 'love': 2}, {'nature':3, 'beauty':4}])
>>> sa.summary().dict_value_summary()
+--------------------+---------------+----------+
| item | value | is exact |
+--------------------+---------------+----------+
| Length | 4 | Yes |
| Min | 1.0 | Yes |
| Max | 4.0 | Yes |
| Mean | 2.5 | Yes |
| Sum | 10.0 | Yes |
| Variance | 1.25 | Yes |
| Standard Deviation | 1.11803398875 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 4 | No |
+--------------------+---------------+----------+
Most frequent items:
+-------+-----+-----+-----+-----+
| value | 1.0 | 2.0 | 3.0 | 4.0 |
+-------+-----+-----+-----+-----+
| count | 1 | 1 | 1 | 1 |
+-------+-----+-----+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 2.0 | 3.0 | 4.0 | 4.0 | 4.0 | 4.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.dict_value_summary())
def element_summary(self):
"""
Returns the sketch summary for all element values. This is only valid for
sketch object created from SArray of list or vector(array) type.
For SArray of list type, all list values are treated as string for
sketch summary.
For SArray of vector type, the sketch summary is on FLOAT type.
Examples
--------
>>> sa = turicreate.SArray([[1,2,3], [4,5]])
>>> sa.summary().element_summary()
+--------------------+---------------+----------+
| item | value | is exact |
+--------------------+---------------+----------+
| Length | 5 | Yes |
| Min | 1.0 | Yes |
| Max | 5.0 | Yes |
| Mean | 3.0 | Yes |
| Sum | 15.0 | Yes |
| Variance | 2.0 | Yes |
| Standard Deviation | 1.41421356237 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 5 | No |
+--------------------+---------------+----------+
Most frequent items:
+-------+-----+-----+-----+-----+-----+
| value | 1.0 | 2.0 | 3.0 | 4.0 | 5.0 |
+-------+-----+-----+-----+-----+-----+
| count | 1 | 1 | 1 | 1 | 1 |
+-------+-----+-----+-----+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 2.0 | 3.0 | 4.0 | 5.0 | 5.0 | 5.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.element_summary())
def element_sub_sketch(self, keys = None):
"""
Returns the sketch summary for the given set of keys. This is only
applicable for sketch summary created from SArray of sarray or dict type.
For dict SArray, the keys are the keys in dict value.
For array Sarray, the keys are indexes into the array value.
The keys must be passed into original summary() call in order to
be able to be retrieved later
Parameters
-----------
keys : list of str | str | list of int | int
The list of dictionary keys or array index to get sub sketch from.
if not given, then retrieve all sub sketches that are available
Returns
-------
A dictionary that maps from the key(index) to the actual sketch summary
for that key(index)
Examples
--------
>>> sa = turicreate.SArray([{'a':1, 'b':2}, {'a':4, 'd':1}])
>>> s = sa.summary(sub_sketch_keys=['a','b'])
>>> s.element_sub_sketch(['a'])
{'a':
+--------------------+-------+----------+
| item | value | is exact |
+--------------------+-------+----------+
| Length | 2 | Yes |
| Min | 1.0 | Yes |
| Max | 4.0 | Yes |
| Mean | 2.5 | Yes |
| Sum | 5.0 | Yes |
| Variance | 2.25 | Yes |
| Standard Deviation | 1.5 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 2 | No |
+--------------------+-------+----------+
Most frequent items:
+-------+-----+-----+
| value | 1.0 | 4.0 |
+-------+-----+-----+
| count | 1 | 1 |
+-------+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 1.0 | 4.0 | 4.0 | 4.0 | 4.0 | 4.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+}
"""
single_val = False
if keys is None:
keys = []
else:
if not isinstance(keys, list):
single_val = True
keys = [keys]
value_types = set([type(i) for i in keys])
if (len(value_types) > 1):
raise ValueError("All keys should have the same type.")
with cython_context():
ret_sketches = self.__proxy__.element_sub_sketch(keys)
ret = {}
# check return key matches input key
for key in keys:
if key not in ret_sketches:
raise KeyError("Cannot retrieve element sub sketch for key '" + str(key) + "'. Element sub sketch can only be retrieved when the summary object was created using the 'sub_sketch_keys' option.")
for key in ret_sketches:
ret[key] = Sketch(_proxy = ret_sketches[key])
if single_val:
return ret[keys[0]]
else:
return ret
def cancel(self):
"""
Cancels a background sketch computation immediately if one is ongoing.
Does nothing otherwise.
Examples
--------
>>> s = sa.summary(array, background=True)
>>> s.cancel()
"""
with cython_context():
self.__proxy__.cancel()
| 36.728117
| 209
| 0.502799
|
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from .._cython.cy_sketch import UnitySketchProxy
from .._cython.context import debug_trace as cython_context
from .sarray import SArray
from .sframe import SFrame
import operator
from math import sqrt
__all__ = ['Sketch']
class Sketch(object):
def __init__(self, array=None, background=False, sub_sketch_keys=[], _proxy=None):
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySketchProxy()
if not isinstance(array, SArray):
raise TypeError("Sketch object can only be constructed from SArrays")
self.__proxy__.construct_from_sarray(array.__proxy__, background, sub_sketch_keys)
def __repr__(self):
fields = [
['size', 'Length' , 'Yes'],
['min', 'Min' , 'Yes'],
['max', 'Max' , 'Yes'],
['mean', 'Mean' , 'Yes'],
['sum', 'Sum' , 'Yes'],
['var', 'Variance' , 'Yes'],
['std', 'Standard Deviation' , 'Yes'],
['num_missing', '# Missing Values' , 'Yes',],
['num_unique', '# unique values', 'No' ]
]
s = '\n'
result = []
for field in fields:
try:
method_to_call = getattr(self, field[0])
result.append([field[1], str(method_to_call()), field[2]])
except:
pass
sf = SArray(result).unpack(column_name_prefix = "")
sf.rename({'0': 'item', '1':'value', '2': 'is exact'}, inplace=True)
s += sf.__str__(footer=False)
s += "\n"
s += "\nMost frequent items:\n"
frequent = self.frequent_items()
frequent_strkeys = {}
for key in frequent:
strkey = str(key)
if strkey in frequent_strkeys:
frequent_strkeys[strkey] += frequent[key]
else:
frequent_strkeys[strkey] = frequent[key]
sorted_freq = sorted(frequent_strkeys.items(), key=operator.itemgetter(1), reverse=True)
if len(sorted_freq) == 0:
s += " -- All elements appear with less than 0.01% frequency -- \n"
else:
sorted_freq = sorted_freq[:10]
sf = SFrame()
sf['value'] = [elem[0] for elem in sorted_freq]
sf['count'] = [elem[1] for elem in sorted_freq]
s += sf.__str__(footer=False) + "\n"
s += "\n"
try:
self.quantile(0)
s += "Quantiles: \n"
sf = SFrame()
for q in [0.0,0.01,0.05,0.25,0.5,0.75,0.95,0.99,1.00]:
sf.add_column(SArray([self.quantile(q)]), str(int(q * 100)) + '%', inplace=True)
s += sf.__str__(footer=False) + "\n"
except:
pass
try:
t_k = self.dict_key_summary()
t_v = self.dict_value_summary()
s += "\n******** Dictionary Element Key Summary ********\n"
s += t_k.__repr__()
s += "\n******** Dictionary Element Value Summary ********\n"
s += t_v.__repr__() + '\n'
except:
pass
try:
t_k = self.element_summary()
s += "\n******** Element Summary ********\n"
s += t_k.__repr__() + '\n'
except:
pass
return s.expandtabs(8)
def __str__(self):
return self.__repr__()
def size(self):
with cython_context():
return int(self.__proxy__.size())
def max(self):
with cython_context():
return self.__proxy__.max()
def min(self):
with cython_context():
return self.__proxy__.min()
def sum(self):
with cython_context():
return self.__proxy__.sum()
def mean(self):
with cython_context():
return self.__proxy__.mean()
def std(self):
return sqrt(self.var())
def var(self):
with cython_context():
return self.__proxy__.var()
def num_missing(self):
with cython_context():
return int(self.__proxy__.num_undefined())
def num_unique(self):
with cython_context():
return int(self.__proxy__.num_unique())
def frequent_items(self):
with cython_context():
return self.__proxy__.frequent_items()
def quantile(self, quantile_val):
with cython_context():
return self.__proxy__.get_quantile(quantile_val)
def frequency_count(self, element):
with cython_context():
return int(self.__proxy__.frequency_count(element))
def sketch_ready(self):
with cython_context():
return self.__proxy__.sketch_ready()
def num_elements_processed(self):
with cython_context():
return self.__proxy__.num_elements_processed()
def element_length_summary(self):
with cython_context():
return Sketch(_proxy = self.__proxy__.element_length_summary())
def dict_key_summary(self):
with cython_context():
return Sketch(_proxy = self.__proxy__.dict_key_summary())
def dict_value_summary(self):
with cython_context():
return Sketch(_proxy = self.__proxy__.dict_value_summary())
def element_summary(self):
with cython_context():
return Sketch(_proxy = self.__proxy__.element_summary())
def element_sub_sketch(self, keys = None):
single_val = False
if keys is None:
keys = []
else:
if not isinstance(keys, list):
single_val = True
keys = [keys]
value_types = set([type(i) for i in keys])
if (len(value_types) > 1):
raise ValueError("All keys should have the same type.")
with cython_context():
ret_sketches = self.__proxy__.element_sub_sketch(keys)
ret = {}
for key in keys:
if key not in ret_sketches:
raise KeyError("Cannot retrieve element sub sketch for key '" + str(key) + "'. Element sub sketch can only be retrieved when the summary object was created using the 'sub_sketch_keys' option.")
for key in ret_sketches:
ret[key] = Sketch(_proxy = ret_sketches[key])
if single_val:
return ret[keys[0]]
else:
return ret
def cancel(self):
with cython_context():
self.__proxy__.cancel()
| true
| true
|
f71a380e5b2adadd88bb74e831433cb584917dad
| 965
|
py
|
Python
|
docs/source/rules/examples/REQ-E004/tester.py
|
yyang08/swagger-spec-compatibility
|
e7a6ba6fc53c6a8a92ba26016219a595a8cecbbe
|
[
"Apache-2.0"
] | 18
|
2019-04-30T21:07:30.000Z
|
2021-12-16T17:56:08.000Z
|
docs/source/rules/examples/REQ-E004/tester.py
|
yyang08/swagger-spec-compatibility
|
e7a6ba6fc53c6a8a92ba26016219a595a8cecbbe
|
[
"Apache-2.0"
] | 30
|
2019-02-26T11:25:44.000Z
|
2021-04-16T00:12:11.000Z
|
docs/source/rules/examples/REQ-E004/tester.py
|
yyang08/swagger-spec-compatibility
|
e7a6ba6fc53c6a8a92ba26016219a595a8cecbbe
|
[
"Apache-2.0"
] | 6
|
2019-02-25T22:12:29.000Z
|
2020-12-23T00:24:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import abspath
from bravado.client import SwaggerClient
from jsonschema import ValidationError
from six.moves.urllib.parse import urljoin
from six.moves.urllib.request import pathname2url
old_client = SwaggerClient.from_url(
spec_url=urljoin('file:', pathname2url(abspath('old.yaml'))),
)
new_client = SwaggerClient.from_url(
spec_url=urljoin('file:', pathname2url(abspath('new.yaml'))),
)
object_to_send = {'property_1': 'v1', 'property_2': 'v2', 'property_3': 'v3'}
print('Calling the post endpoint with the old client: Succeeded')
old_client.endpoint.post_endpoint(body=object_to_send)
print('Calling the post endpoint with the old client: Failed')
try:
new_client.endpoint.post_endpoint(body=object_to_send)
raise RuntimeError('An error was expected')
except ValidationError:
pass
| 31.129032
| 77
| 0.779275
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import abspath
from bravado.client import SwaggerClient
from jsonschema import ValidationError
from six.moves.urllib.parse import urljoin
from six.moves.urllib.request import pathname2url
old_client = SwaggerClient.from_url(
spec_url=urljoin('file:', pathname2url(abspath('old.yaml'))),
)
new_client = SwaggerClient.from_url(
spec_url=urljoin('file:', pathname2url(abspath('new.yaml'))),
)
object_to_send = {'property_1': 'v1', 'property_2': 'v2', 'property_3': 'v3'}
print('Calling the post endpoint with the old client: Succeeded')
old_client.endpoint.post_endpoint(body=object_to_send)
print('Calling the post endpoint with the old client: Failed')
try:
new_client.endpoint.post_endpoint(body=object_to_send)
raise RuntimeError('An error was expected')
except ValidationError:
pass
| true
| true
|
f71a3828e5ab1b447e9e0f5e00e3b95d8c4e7d7e
| 3,496
|
py
|
Python
|
examples/upload_a_chapter.py
|
PythonCoderAS/Hondana
|
14a7db9837bbe78212c462f845278777c246e3bf
|
[
"MIT"
] | 19
|
2021-07-21T01:25:06.000Z
|
2022-03-14T21:22:45.000Z
|
examples/upload_a_chapter.py
|
PythonCoderAS/Hondana
|
14a7db9837bbe78212c462f845278777c246e3bf
|
[
"MIT"
] | 5
|
2021-12-05T22:21:59.000Z
|
2022-03-18T16:30:24.000Z
|
examples/upload_a_chapter.py
|
PythonCoderAS/Hondana
|
14a7db9837bbe78212c462f845278777c246e3bf
|
[
"MIT"
] | 12
|
2021-07-17T18:26:33.000Z
|
2022-03-21T19:57:46.000Z
|
"""
This example shows three different ways to perform this task.
Please examine all three to find a method you like.
If you ask me: I prefer the first.
"""
import asyncio
import hondana
# Create your client, you must be authorised to upload a chapter.
client = hondana.Client(username="my username", password="my password")
async def main():
"""
In this example we are going to upload a chapter to the MangaDex API.
"""
# Define your chapter details
chapter = "1"
volume = "1"
translated_language = "en"
title = "..."
scanlator_groups = ["..."]
# Get the manga we are going to upload a chapter for.
manga = await client.view_manga("...")
# let's open up some images, and store their ``bytes`` in memory
## NOTE: The order of this list is important, this is the order in which the pages will be presented in the finished upload.
## Please ensure you order this correctly.
files: list[bytes] = []
# Open our upload session
async with client.upload_session(
manga,
volume=volume,
chapter=chapter,
title=title,
translated_language=translated_language,
scanlator_groups=scanlator_groups,
) as upload_session:
# First we upload the bytes we stored in memory, adhering to the earlier note.
await upload_session.upload_images(files)
# Then we choose to commit that data, which returns a valid ``hondana.Chapter`` instance.
chapter = await upload_session.commit()
## You can also choose not to commit manually, exiting this context manager will commit for you, and discard the returned chapter data.
async def alternative_main():
# Define your chapter details
chapter = "1"
volume = "1"
translated_language = "en"
title = "..."
scanlator_groups = ["..."]
# This will create and return an instance of ``hondana.ChapterUpload``
## You can also use a manga ID, or a ``hondana.Manga`` instance as the first parameter
upload_session = client.upload_session(
"...",
volume=volume,
chapter=chapter,
title=title,
translated_language=translated_language,
scanlator_groups=scanlator_groups,
)
# I recommend the context manager method, since the session checking and committing are done for you.
await upload_session._check_for_session()
# Create and upload your images.
## NOTE: The order of this list is important, this is the order in which the pages will be presented in the finished upload.
## Please ensure you order this correctly.
images: list[bytes] = []
await upload_session.upload_images(images)
## NOTE: You **MUST** commit when not using the context manager.
chapter = await upload_session.commit()
async def other_alternative_main():
# Define your chapter details
chapter = "1"
volume = "1"
translated_language = "en"
title = "..."
scanlator_groups = ["..."]
# Create and upload your images.
## NOTE: The order of this list is important, this is the order in which the pages will be presented in the finished upload.
## Please ensure you order this correctly.
images: list[bytes] = []
chapter = await client.upload_chapter(
"...",
volume=volume,
chapter=chapter,
title=title,
translated_language=translated_language,
images=images,
scanlator_groups=scanlator_groups,
)
asyncio.run(main())
| 30.4
| 139
| 0.670767
|
import asyncio
import hondana
client = hondana.Client(username="my username", password="my password")
async def main():
chapter = "1"
volume = "1"
translated_language = "en"
title = "..."
scanlator_groups = ["..."]
manga = await client.view_manga("...")
## NOTE: The order of this list is important, this is the order in which the pages will be presented in the finished upload.
## Please ensure you order this correctly.
files: list[bytes] = []
# Open our upload session
async with client.upload_session(
manga,
volume=volume,
chapter=chapter,
title=title,
translated_language=translated_language,
scanlator_groups=scanlator_groups,
) as upload_session:
# First we upload the bytes we stored in memory, adhering to the earlier note.
await upload_session.upload_images(files)
# Then we choose to commit that data, which returns a valid ``hondana.Chapter`` instance.
chapter = await upload_session.commit()
## You can also choose not to commit manually, exiting this context manager will commit for you, and discard the returned chapter data.
async def alternative_main():
# Define your chapter details
chapter = "1"
volume = "1"
translated_language = "en"
title = "..."
scanlator_groups = ["..."]
# This will create and return an instance of ``hondana.ChapterUpload``
## You can also use a manga ID, or a ``hondana.Manga`` instance as the first parameter
upload_session = client.upload_session(
"...",
volume=volume,
chapter=chapter,
title=title,
translated_language=translated_language,
scanlator_groups=scanlator_groups,
)
# I recommend the context manager method, since the session checking and committing are done for you.
await upload_session._check_for_session()
# Create and upload your images.
## NOTE: The order of this list is important, this is the order in which the pages will be presented in the finished upload.
## Please ensure you order this correctly.
images: list[bytes] = []
await upload_session.upload_images(images)
## NOTE: You **MUST** commit when not using the context manager.
chapter = await upload_session.commit()
async def other_alternative_main():
# Define your chapter details
chapter = "1"
volume = "1"
translated_language = "en"
title = "..."
scanlator_groups = ["..."]
# Create and upload your images.
## NOTE: The order of this list is important, this is the order in which the pages will be presented in the finished upload.
## Please ensure you order this correctly.
images: list[bytes] = []
chapter = await client.upload_chapter(
"...",
volume=volume,
chapter=chapter,
title=title,
translated_language=translated_language,
images=images,
scanlator_groups=scanlator_groups,
)
asyncio.run(main())
| true
| true
|
f71a387c3ff2cd382f14cdd92eec52461942a18f
| 945
|
py
|
Python
|
questions/q354_water_overflow/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
questions/q354_water_overflow/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | 1
|
2021-05-15T07:56:51.000Z
|
2021-05-15T07:56:51.000Z
|
questions/q354_water_overflow/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
class Solution:
def waterOverflow(self, K, R, C):
if R <= 0 or C <= 0 or C > R :
return 0
table = [[K]]
i = 0
while True :
table.append([0]*(i+2))
flag = True
for j in range(i+1) :
if table[i][j] > 1 :
val = (table[i][j] - 1) / 2
table[i][j] = 1
table[i+1][j] += val
table[i+1][j+1] += val
flag = False
if flag or i > (R-1) :
break
i += 1
if table[R-1][C-1] == int(table[R-1][C-1]) :
return int(table[R-1][C-1])
return round(table[R-1][C-1], 6)
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
K,R,C=map(int,input().split())
ob = Solution()
print(ob.waterOverflow(K,R,C))
| 26.25
| 52
| 0.359788
|
class Solution:
def waterOverflow(self, K, R, C):
if R <= 0 or C <= 0 or C > R :
return 0
table = [[K]]
i = 0
while True :
table.append([0]*(i+2))
flag = True
for j in range(i+1) :
if table[i][j] > 1 :
val = (table[i][j] - 1) / 2
table[i][j] = 1
table[i+1][j] += val
table[i+1][j+1] += val
flag = False
if flag or i > (R-1) :
break
i += 1
if table[R-1][C-1] == int(table[R-1][C-1]) :
return int(table[R-1][C-1])
return round(table[R-1][C-1], 6)
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
K,R,C=map(int,input().split())
ob = Solution()
print(ob.waterOverflow(K,R,C))
| true
| true
|
f71a389b852f7333755362f2c4739c7e128d3163
| 173
|
py
|
Python
|
LR/production/test.py
|
whz-NJ/PersonalRecommendation
|
4887209270f052d6d39bb35ee0c90498496849d8
|
[
"Apache-2.0"
] | null | null | null |
LR/production/test.py
|
whz-NJ/PersonalRecommendation
|
4887209270f052d6d39bb35ee0c90498496849d8
|
[
"Apache-2.0"
] | null | null | null |
LR/production/test.py
|
whz-NJ/PersonalRecommendation
|
4887209270f052d6d39bb35ee0c90498496849d8
|
[
"Apache-2.0"
] | null | null | null |
#看看文件内容有多少列
if __name__ == "__main__":
fp = open("../data/lr_coef")
count = 0
for line in fp:
item = line.strip().split(",")
print (len(item))
| 19.222222
| 38
| 0.531792
|
if __name__ == "__main__":
fp = open("../data/lr_coef")
count = 0
for line in fp:
item = line.strip().split(",")
print (len(item))
| true
| true
|
f71a3969c7a14edff97577d65dbc459028956dcc
| 654
|
py
|
Python
|
projects/migrations/0017_project_user.py
|
Tuitoek/Awwards
|
090b4a0dc7ea3b0b733d61732fca4554baba5e90
|
[
"MIT"
] | null | null | null |
projects/migrations/0017_project_user.py
|
Tuitoek/Awwards
|
090b4a0dc7ea3b0b733d61732fca4554baba5e90
|
[
"MIT"
] | null | null | null |
projects/migrations/0017_project_user.py
|
Tuitoek/Awwards
|
090b4a0dc7ea3b0b733d61732fca4554baba5e90
|
[
"MIT"
] | 1
|
2021-09-21T12:52:12.000Z
|
2021-09-21T12:52:12.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-20 14:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0016_auto_20190320_1731'),
]
operations = [
migrations.AddField(
model_name='project',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 27.25
| 124
| 0.683486
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0016_auto_20190320_1731'),
]
operations = [
migrations.AddField(
model_name='project',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true
| true
|
f71a397e2dbddfef3306743b9d7789a6cc7dd8b2
| 46,025
|
py
|
Python
|
selfdrive/car/hyundai/values.py
|
yunbong2/multi-076
|
5079eab33fbc69097e38cd8aced3c904c11c9bc8
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/values.py
|
yunbong2/multi-076
|
5079eab33fbc69097e38cd8aced3c904c11c9bc8
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/values.py
|
yunbong2/multi-076
|
5079eab33fbc69097e38cd8aced3c904c11c9bc8
|
[
"MIT"
] | 5
|
2020-09-28T06:36:56.000Z
|
2020-09-29T13:26:03.000Z
|
from cereal import car
from selfdrive.car import dbc_dict
from common.params import Params
Ecu = car.CarParams.Ecu
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 280 # 409 is the max, 255 is stock
STEER_DELTA_UP = 5
STEER_DELTA_DOWN = 5
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
AVANTE = "HYUNDAI AVANTE"
SONATA = "HYUNDAI SONATA"
SONATA_HEV = "HYUNDAI SONATA Hybrid"
SONATA_TURBO = "HYUNDAI SONATA Turbo"
GRANDEUR = "HYUNDAI GRANDEUR"
GRANDEUR_HEV = "HYUNDAI GRANDEUR Hybrid"
GENESIS = "GENESIS"
SANTAFE = "HYUNDAI SANTAFE"
KONA = "HYUNDAI KONA"
KONA_HEV = "HYUNDAI KONA Hybrid"
KONA_EV = "HYUNDAI KONA ELECTRIC"
IONIQ_HEV = "HYUNDAI IONIQ HYBRID"
IONIQ_EV = "HYUNDAI IONIQ ELECTRIC"
K5 = "KIA K5"
K5_HEV = "KIA K5 Hybrid"
K7 = "KIA K7"
K7_HEV = "KIA K7 Hybrid"
STINGER = "KIA STINGER"
SORENTO = "KIA SORENTO"
NIRO_HEV = "KIA NIRO Hybrid"
NIRO_EV = "KIA NIRO ELECTRIC"
NEXO = "HYUNDAI NEXO"
MOHAVE = "KIA MOHAVE"
I30 = "HYUNDAI I30"
SELTOS = "KIA SELTOS"
PALISADE = "HYUNDAI PALISADE"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
params = Params()
fingerprint_issued_fix = params.get("FingerprintIssuedFix", encoding='utf8') == "1"
if fingerprint_issued_fix:
FINGERPRINTS = {
CAR.AVANTE: [{}],
CAR.SONATA: [{}],
CAR.SONATA_HEV: [{}],
CAR.SONATA_TURBO: [{}],
CAR.GRANDEUR: [{}],
CAR.GRANDEUR_HEV: [{}],
CAR.GENESIS: [{}],
CAR.SANTAFE: [{}],
CAR.KONA: [{}],
CAR.KONA_HEV: [{}],
CAR.KONA_EV: [{}],
CAR.IONIQ_HEV: [{}],
CAR.IONIQ_EV: [{}],
CAR.K5: [{}],
CAR.K5_HEV: [{}],
CAR.K7: [{}],
CAR.K7_HEV: [{}],
CAR.STINGER: [{}],
CAR.NIRO_HEV: [{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1292: 8, 1345: 8, 1363: 8, 1419: 8, 1429: 8, 1448: 8, 1456: 4},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.NIRO_EV: [{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8}],
CAR.NEXO: [{}],
CAR.MOHAVE: [{}],
CAR.I30: [{}],
CAR.SELTOS: [{}],
CAR.PALISADE: [{}],
CAR.SORENTO: [{}],
}
else:
FINGERPRINTS = {
CAR.AVANTE: [{66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1345: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1427: 6, 1440: 8, 1456: 4, 1472: 8, 1491: 8, 1530: 8}],
CAR.SONATA: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8},
{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 625: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1491: 8, 1530: 8, 1990: 8, 1998: 8, 2016: 8, 2024: 8},
{66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1491: 8, 1530: 8}],
CAR.SONATA_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 7, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8}],
CAR.SONATA_TURBO: [{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 2015: 8, 2024: 8},
{66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1471: 8, 1472: 8, 1491: 8, 1530: 8, 1532: 5, 2016: 8, 2024: 8},
{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1905: 8, 1913: 8, 1990: 8, 1998: 8, 2006: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8}],
CAR.GRANDEUR: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8}],
CAR.GRANDEUR_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.GENESIS: [{67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1437: 8, 1456: 4},
{67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4},
{67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4}],
CAR.SANTAFE: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 1628: 8, 1629: 8, 1630: 8, 1631: 8, 1674: 8, 1675: 8, 1676: 8, 1677: 8, 1791: 8, 2015: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1911: 8}],
CAR.KONA: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8}],
CAR.KONA_HEV: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1998: 8, 2001: 8, 2004: 8, 2009: 8, 2012: 8, 2015: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.KONA_EV: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1998: 8, 2001: 8, 2004: 8, 2009: 8, 2012: 8, 2015: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8}],
CAR.IONIQ_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470:8, 1476: 8, 1535: 8}],
CAR.IONIQ_EV: [{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 545: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 546: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8}],
CAR.K5: [{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8},
{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1492: 8}],
CAR.K5_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1236: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.K7: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 608: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8}],
CAR.K7_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1096: 8, 1102: 8, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.STINGER: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1437: 8, 1456: 4, 1470: 8}],
CAR.NIRO_HEV: [{}],
CAR.NIRO_EV: [{}],
CAR.NEXO: [{127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8},
{127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8}],
CAR.MOHAVE: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8}],
CAR.I30: [{67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8}],
CAR.SELTOS: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1911: 8}],
CAR.PALISADE: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2000: 8, 2005: 8, 2008: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8}],
CAR.SORENTO: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 548: 8, 550: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 608: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1384: 5, 1407: 8, 1411: 8, 1419: 8, 1427: 6, 1437: 8, 1444: 8, 1456: 4, 1470: 8, 1489: 1, 1990: 8, 1998: 8}],
}
ECU_FINGERPRINT = {
Ecu.fwdCamera: [832, 1156, 1191, 1342]
}
CHECKSUM = {
"crc8": [CAR.SANTAFE, CAR.SONATA, CAR.PALISADE],
"6B": [CAR.SORENTO, CAR.GENESIS],
}
FEATURES = {
"use_cluster_gears": [CAR.KONA, CAR.GRANDEUR, CAR.K7, CAR.MOHAVE, CAR.I30, CAR.AVANTE], # Use Cluster for Gear Selection, rather than Transmission
"use_tcu_gears": [CAR.K5, CAR.SONATA, CAR.SONATA_TURBO], # Use TCU Message for Gear Selection
"use_elect_gears": [CAR.K5_HEV, CAR.SONATA_HEV, CAR.GRANDEUR_HEV, CAR.IONIQ_HEV, CAR.IONIQ_EV, CAR.NIRO_HEV, CAR.KONA_HEV, CAR.KONA_EV, CAR.NIRO_EV, CAR.NEXO], # Use TCU Message for Gear Selection
}
EV_HYBRID = [CAR.K5_HEV, CAR.SONATA_HEV, CAR.GRANDEUR_HEV, CAR.IONIQ_HEV, CAR.IONIQ_EV, CAR.NIRO_HEV, CAR.KONA_HEV, CAR.KONA_EV, CAR.NIRO_EV, CAR.NEXO]
DBC = {
CAR.AVANTE: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_TURBO: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.SANTAFE: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', None),
CAR.K5: dbc_dict('hyundai_kia_generic', None),
CAR.K5_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K7: dbc_dict('hyundai_kia_generic', None),
CAR.K7_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', None),
CAR.NEXO: dbc_dict('hyundai_kia_generic', None),
CAR.MOHAVE: dbc_dict('hyundai_kia_generic', None),
CAR.I30: dbc_dict('hyundai_kia_generic', None),
CAR.SELTOS: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', None),
CAR.SORENTO: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 360
| 212.096774
| 793
| 0.536274
|
from cereal import car
from selfdrive.car import dbc_dict
from common.params import Params
Ecu = car.CarParams.Ecu
class SteerLimitParams:
STEER_MAX = 280
STEER_DELTA_UP = 5
STEER_DELTA_DOWN = 5
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
AVANTE = "HYUNDAI AVANTE"
SONATA = "HYUNDAI SONATA"
SONATA_HEV = "HYUNDAI SONATA Hybrid"
SONATA_TURBO = "HYUNDAI SONATA Turbo"
GRANDEUR = "HYUNDAI GRANDEUR"
GRANDEUR_HEV = "HYUNDAI GRANDEUR Hybrid"
GENESIS = "GENESIS"
SANTAFE = "HYUNDAI SANTAFE"
KONA = "HYUNDAI KONA"
KONA_HEV = "HYUNDAI KONA Hybrid"
KONA_EV = "HYUNDAI KONA ELECTRIC"
IONIQ_HEV = "HYUNDAI IONIQ HYBRID"
IONIQ_EV = "HYUNDAI IONIQ ELECTRIC"
K5 = "KIA K5"
K5_HEV = "KIA K5 Hybrid"
K7 = "KIA K7"
K7_HEV = "KIA K7 Hybrid"
STINGER = "KIA STINGER"
SORENTO = "KIA SORENTO"
NIRO_HEV = "KIA NIRO Hybrid"
NIRO_EV = "KIA NIRO ELECTRIC"
NEXO = "HYUNDAI NEXO"
MOHAVE = "KIA MOHAVE"
I30 = "HYUNDAI I30"
SELTOS = "KIA SELTOS"
PALISADE = "HYUNDAI PALISADE"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
params = Params()
fingerprint_issued_fix = params.get("FingerprintIssuedFix", encoding='utf8') == "1"
if fingerprint_issued_fix:
FINGERPRINTS = {
CAR.AVANTE: [{}],
CAR.SONATA: [{}],
CAR.SONATA_HEV: [{}],
CAR.SONATA_TURBO: [{}],
CAR.GRANDEUR: [{}],
CAR.GRANDEUR_HEV: [{}],
CAR.GENESIS: [{}],
CAR.SANTAFE: [{}],
CAR.KONA: [{}],
CAR.KONA_HEV: [{}],
CAR.KONA_EV: [{}],
CAR.IONIQ_HEV: [{}],
CAR.IONIQ_EV: [{}],
CAR.K5: [{}],
CAR.K5_HEV: [{}],
CAR.K7: [{}],
CAR.K7_HEV: [{}],
CAR.STINGER: [{}],
CAR.NIRO_HEV: [{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1292: 8, 1345: 8, 1363: 8, 1419: 8, 1429: 8, 1448: 8, 1456: 4},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.NIRO_EV: [{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8}],
CAR.NEXO: [{}],
CAR.MOHAVE: [{}],
CAR.I30: [{}],
CAR.SELTOS: [{}],
CAR.PALISADE: [{}],
CAR.SORENTO: [{}],
}
else:
FINGERPRINTS = {
CAR.AVANTE: [{66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1345: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1427: 6, 1440: 8, 1456: 4, 1472: 8, 1491: 8, 1530: 8}],
CAR.SONATA: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8},
{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 625: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1491: 8, 1530: 8, 1990: 8, 1998: 8, 2016: 8, 2024: 8},
{66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1491: 8, 1530: 8}],
CAR.SONATA_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 7, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8}],
CAR.SONATA_TURBO: [{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 2015: 8, 2024: 8},
{66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1471: 8, 1472: 8, 1491: 8, 1530: 8, 1532: 5, 2016: 8, 2024: 8},
{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1905: 8, 1913: 8, 1990: 8, 1998: 8, 2006: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8}],
CAR.GRANDEUR: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8}],
CAR.GRANDEUR_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.GENESIS: [{67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1437: 8, 1456: 4},
{67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4},
{67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4}],
CAR.SANTAFE: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 1628: 8, 1629: 8, 1630: 8, 1631: 8, 1674: 8, 1675: 8, 1676: 8, 1677: 8, 1791: 8, 2015: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1911: 8}],
CAR.KONA: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8}],
CAR.KONA_HEV: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1998: 8, 2001: 8, 2004: 8, 2009: 8, 2012: 8, 2015: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.KONA_EV: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1998: 8, 2001: 8, 2004: 8, 2009: 8, 2012: 8, 2015: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8}],
CAR.IONIQ_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470:8, 1476: 8, 1535: 8}],
CAR.IONIQ_EV: [{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 545: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 546: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8}],
CAR.K5: [{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8},
{64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1492: 8}],
CAR.K5_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1236: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.K7: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 608: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8}],
CAR.K7_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1096: 8, 1102: 8, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.STINGER: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1437: 8, 1456: 4, 1470: 8}],
CAR.NIRO_HEV: [{}],
CAR.NIRO_EV: [{}],
CAR.NEXO: [{127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8},
{127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8}],
CAR.MOHAVE: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8}],
CAR.I30: [{67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8}],
CAR.SELTOS: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1911: 8}],
CAR.PALISADE: [{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2000: 8, 2005: 8, 2008: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8}],
CAR.SORENTO: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 548: 8, 550: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8},
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 608: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1384: 5, 1407: 8, 1411: 8, 1419: 8, 1427: 6, 1437: 8, 1444: 8, 1456: 4, 1470: 8, 1489: 1, 1990: 8, 1998: 8}],
}
ECU_FINGERPRINT = {
Ecu.fwdCamera: [832, 1156, 1191, 1342]
}
CHECKSUM = {
"crc8": [CAR.SANTAFE, CAR.SONATA, CAR.PALISADE],
"6B": [CAR.SORENTO, CAR.GENESIS],
}
FEATURES = {
"use_cluster_gears": [CAR.KONA, CAR.GRANDEUR, CAR.K7, CAR.MOHAVE, CAR.I30, CAR.AVANTE],
"use_tcu_gears": [CAR.K5, CAR.SONATA, CAR.SONATA_TURBO],
"use_elect_gears": [CAR.K5_HEV, CAR.SONATA_HEV, CAR.GRANDEUR_HEV, CAR.IONIQ_HEV, CAR.IONIQ_EV, CAR.NIRO_HEV, CAR.KONA_HEV, CAR.KONA_EV, CAR.NIRO_EV, CAR.NEXO],
}
EV_HYBRID = [CAR.K5_HEV, CAR.SONATA_HEV, CAR.GRANDEUR_HEV, CAR.IONIQ_HEV, CAR.IONIQ_EV, CAR.NIRO_HEV, CAR.KONA_HEV, CAR.KONA_EV, CAR.NIRO_EV, CAR.NEXO]
DBC = {
CAR.AVANTE: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_TURBO: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.SANTAFE: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', None),
CAR.K5: dbc_dict('hyundai_kia_generic', None),
CAR.K5_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K7: dbc_dict('hyundai_kia_generic', None),
CAR.K7_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', None),
CAR.NEXO: dbc_dict('hyundai_kia_generic', None),
CAR.MOHAVE: dbc_dict('hyundai_kia_generic', None),
CAR.I30: dbc_dict('hyundai_kia_generic', None),
CAR.SELTOS: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', None),
CAR.SORENTO: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 360
| true
| true
|
f71a3a75821354fee84241165aa869abf4a61832
| 5,614
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/documentdb/v20200901/notebook_workspace.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/documentdb/v20200901/notebook_workspace.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/documentdb/v20200901/notebook_workspace.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['NotebookWorkspace']
class NotebookWorkspace(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
notebook_workspace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A notebook workspace resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] notebook_workspace_name: The name of the notebook workspace resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['notebook_workspace_name'] = notebook_workspace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['notebook_server_endpoint'] = None
__props__['status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/latest:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:NotebookWorkspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NotebookWorkspace, __self__).__init__(
'azure-nextgen:documentdb/v20200901:NotebookWorkspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NotebookWorkspace':
"""
Get an existing NotebookWorkspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return NotebookWorkspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notebookServerEndpoint")
def notebook_server_endpoint(self) -> pulumi.Output[str]:
"""
Specifies the endpoint of Notebook server.
"""
return pulumi.get(self, "notebook_server_endpoint")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Status of the notebook workspace. Possible values are: Creating, Online, Deleting, Failed, Updating.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.912
| 655
| 0.665301
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['NotebookWorkspace']
class NotebookWorkspace(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
notebook_workspace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['notebook_workspace_name'] = notebook_workspace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['notebook_server_endpoint'] = None
__props__['status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/latest:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:NotebookWorkspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:NotebookWorkspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NotebookWorkspace, __self__).__init__(
'azure-nextgen:documentdb/v20200901:NotebookWorkspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NotebookWorkspace':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return NotebookWorkspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notebookServerEndpoint")
def notebook_server_endpoint(self) -> pulumi.Output[str]:
return pulumi.get(self, "notebook_server_endpoint")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
f71a3c4c2a40dfd2974f50c147e4fa1e98133caa
| 1,214
|
py
|
Python
|
statistical_analysis/gpa_scatter.py
|
guptarohit994/ECE143_group25_project
|
e31d0425b2a6114eed6c55bdb0491c2c996b94be
|
[
"CC0-1.0"
] | null | null | null |
statistical_analysis/gpa_scatter.py
|
guptarohit994/ECE143_group25_project
|
e31d0425b2a6114eed6c55bdb0491c2c996b94be
|
[
"CC0-1.0"
] | null | null | null |
statistical_analysis/gpa_scatter.py
|
guptarohit994/ECE143_group25_project
|
e31d0425b2a6114eed6c55bdb0491c2c996b94be
|
[
"CC0-1.0"
] | null | null | null |
import helper
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def plot_gpa_scatter():
"""Plotting scatterplot of grades expected and grade received, using the general department list
"""
# obtaining data
department_df = helper.generate_depts_df(helper.general_dept_list)
comp_criteria = ["AvgGradeExpected","AvgGradeReceived"]
# generating scatterplot graph
lower_bound = 1.5
upper_bound = 4.02
ax = department_df.plot.scatter(x=comp_criteria[0], y=comp_criteria[1], c= "grey",ylim=(lower_bound,upper_bound),xlim=(lower_bound,upper_bound), figsize=(10,10), fontsize=20, alpha = 0.3)
ax.set_xlabel("Average Grade Expected", fontsize = 20)
ax.set_ylabel("Average Grade Received", fontsize = 20)
# computing least squares best fit line and adding it onto graph
y = department_df["AvgGradeReceived"]
x = department_df["AvgGradeExpected"]
A = np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A, y, rcond=None)[0]
print("m:{}, c:{}".format(m,c))
ax.plot(np.linspace(lower_bound,4,10),np.linspace(lower_bound,4,10),c="red")
ax.plot(np.linspace(lower_bound,4,10),(np.linspace(lower_bound,4,10)*m) + c,c="blue")
| 43.357143
| 191
| 0.706755
|
import helper
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def plot_gpa_scatter():
department_df = helper.generate_depts_df(helper.general_dept_list)
comp_criteria = ["AvgGradeExpected","AvgGradeReceived"]
lower_bound = 1.5
upper_bound = 4.02
ax = department_df.plot.scatter(x=comp_criteria[0], y=comp_criteria[1], c= "grey",ylim=(lower_bound,upper_bound),xlim=(lower_bound,upper_bound), figsize=(10,10), fontsize=20, alpha = 0.3)
ax.set_xlabel("Average Grade Expected", fontsize = 20)
ax.set_ylabel("Average Grade Received", fontsize = 20)
y = department_df["AvgGradeReceived"]
x = department_df["AvgGradeExpected"]
A = np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A, y, rcond=None)[0]
print("m:{}, c:{}".format(m,c))
ax.plot(np.linspace(lower_bound,4,10),np.linspace(lower_bound,4,10),c="red")
ax.plot(np.linspace(lower_bound,4,10),(np.linspace(lower_bound,4,10)*m) + c,c="blue")
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.