repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
fakedrake/overlay_parse
|
overlay_parse/matchers.py
|
MatcherMatcher.offset_overlays
|
python
|
def offset_overlays(self, text, run_matchers=None, **kw):
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.offset_overlays(text, **kw):
yield i
|
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L241-L252
|
[
"def _maybe_run_matchers(self, text, run_matchers):\n \"\"\"\n OverlayedText should be smart enough to not run twice the same\n matchers but this is an extra handle of control over that.\n \"\"\"\n\n if run_matchers is True or \\\n (run_matchers is not False and text not in self._overlayed_already):\n text.overlay(self.matchers)\n self._overlayed_already.append(text)\n"
] |
class MatcherMatcher(BaseMatcher):
"""
Match the matchers.
"""
def __init__(self, matchers, props=None, value_fn=None):
self.matchers = matchers
self.props = props
self.value_fn = value_fn
self._list_match = ListMatcher(
[OverlayMatcher(m.props) for m in matchers], props=self.props)
self._overlayed_already = []
def _maybe_run_matchers(self, text, run_matchers):
"""
OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that.
"""
if run_matchers is True or \
(run_matchers is not False and text not in self._overlayed_already):
text.overlay(self.matchers)
self._overlayed_already.append(text)
def fit_overlays(self, text, run_matchers=None, **kw):
"""
First all matchers will run and then I will try to combine
them. Use run_matchers to force running(True) or not
running(False) the matchers.
See ListMatcher for arguments.
"""
self._maybe_run_matchers(text, run_matchers)
for i in self._list_match.fit_overlay(text, **kw):
yield i
|
fakedrake/overlay_parse
|
overlay_parse/dates.py
|
date_tuple
|
python
|
def date_tuple(ovls):
day = month = year = 0
for o in ovls:
if 'day' in o.props:
day = o.value
if 'month' in o.props:
month = o.value
if 'year' in o.props:
year = o.value
if 'date' in o.props:
day, month, year = [(o or n) for o, n in zip((day, month,
year), o.value)]
return (day, month, year)
|
We should have a list of overlays from which to extract day month
year.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/dates.py#L57-L78
| null |
# -*- coding: utf-8 -*-
from datetime import date
import re
import itertools
from .overlays import OverlayedText
from .matchers import mf
from .util import w, words, starts_with, rx_int, rx_int_extra, Rng
def month_names(rxmatch):
for i, m in enumerate(MONTH_NAMES_LONG):
if starts_with(m, rxmatch.group(0)):
return i + 1
def date_mean(d1, d2):
return tuple([(i1 + i2) / 2 for i1, i2 in zip(d1, d2)])
def date_min(d1, d2):
for i1, i2 in reversed(zip(d1, d2)):
if i2 < i1:
return d2, d1
if i1 < i2:
return d1, d2
def date_pair(ovls):
d2 = ovls[2].value
d1 = ovls[0].value
if d2[2] < 0 and d1[2] > 0:
d, m, y = d1
return date_min((d, m, -y), d2)
return date_min(d1, d2)
def date_range(ovls):
return date_pair(ovls)
def date_conditional(ovls):
e, o = date_pair(ovls)
return date_mean(e, o)
def present(rxmatch):
d = date.today()
return (d.day, d.month, d.year)
def longest_overlap(ovls):
"""
From a list of overlays if any overlap keep the longest.
"""
# Ovls know how to compare to each other.
ovls = sorted(ovls)
# I know this could be better but ovls wont be more than 50 or so.
for i, s in enumerate(ovls):
passing = True
for l in ovls[i + 1:]:
if s.start in Rng(l.start, l.end, rng=(True, True)) or \
s.end in Rng(l.start, l.end, rng=(True, True)):
passing = False
break
if passing:
yield s
MONTH_NAMES_LONG = [
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December",
]
MONTH_NAMES_SHORT = [
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
]
matchers = [
# Regex
('day', mf(r"(0[1-9]|[12][0-9]|3[01]|[1-9])",
{'day', 'num'}, rx_int)),
('day', mf(w(r"(0[1-9]|[12][0-9]|3[01]|[1-9])"),
{'day', 'num', 'word'}, rx_int)),
('day_numeric', mf(w(r"(11th|12th|13th|[012][4-9]th|[4-9]th|[123]0th|[0-3]"
"1st|[02]2nd|023rd)"),
{'day', 'numeric'}, rx_int_extra)),
# Note that regexes are greedy. If there is '07' then '7' alone
# will be ignored
('month', mf(r"(0[1-9]|1[012]|[1-9])", {'month', 'num'}, rx_int)),
('year_4', mf(r"\d{4}", {'year', '4dig', 'num'}, rx_int)),
('year_num', mf(w(r"\d+\s*([Aa]\.?[Dd]\.?)"), {'year', 'adbc', 'num',
"ad", 'word'},
rx_int_extra)),
('year_adbc', mf(w(r"\d+\s*([Bb]\.?[Cc]\.?([Ee]\.?)?)"), {"year", "adbc",
"bc", 'word'},
lambda rxmatch: -rx_int_extra(rxmatch))),
('year_num_word', mf(w(r"\d{1,4}"), {'year', 'num', 'word'},
rx_int)),
('month_name_short', mf(
re.compile(r"(%s)" % "|".join(words(MONTH_NAMES_SHORT)), re.I),
{"month", "name", "shorte"}, month_names)),
('month_name_long', mf(
re.compile(r"(%s)" % "|".join(words(MONTH_NAMES_LONG)), re.I),
{"month", "name", "long"}, month_names)),
# Note that instead of rx or sets you can use a matcher, it will
# be a dependency
# Lists
# June 1991
("far_year", mf([{"month", "name"}, r"\s+", {"year", '4dig'}],
{"date", "year_month"},
date_tuple)),
# July the 14th
('dayn_month_date', mf([{'month', 'name'},
r",?\s*(the)?\s+",
{'day', 'numeric'}],
{"day_month", "numeric", "date"}, date_tuple)),
# July 14
('dayn_month_date', mf([{'month', 'name'}, r"\s+", {'day', 'word'}],
{"day_month", "date"}, date_tuple)),
# July the 14th 1991
('dayn_month_year_date', mf([{'day_month'}, ur"(\s+|\s*,\s*)",
{"year", "word"}],
{"day_month_year", "numeric", "date", "full"},
date_tuple)),
# 14 July 1991
('day_month_year_full', mf([{"day"}, r"\s+(of\s+)?",
{"month", "name"}, r"\s+",
{"year", "word"}],
{"day_month_year", "date"},
date_tuple)),
# 3000AD
("far_year", mf([{"year", 'word'}],
{"date", "only_year"},
date_tuple)),
# July 13, 1991
('month_day_year', mf([{'day_month'}, ur"(\s+|\s*,?\s*)", "year"],
{"month_day_year", "date"},
date_tuple)),
# Present
('present', mf(
r"([pP]resent|[Tt]oday|[Nn]ow)", {"date", "present"}, present)),
]
# Short dates
SEPARATORS = [r"/", r"\.", r"\|", r"-"]
matchers += [('ymd_dates_%s' % s,
mf([{'year', 'num', 'word'}, s, {'month', 'num'},
s, {'day', 'num'}],
{"date", 'short', 'ymd', "sep_%s" % s}, date_tuple))
for s in SEPARATORS]
matchers += [('dmy_dates_%s' % s,
mf([{'day', 'num'}, s, {'month', 'num'}, s,
{'year', 'num', 'word'}],
{"date", 'short', 'dmy', "sep_%s" % s}, date_tuple))
for s in SEPARATORS]
matchers += [("mdy_dates_%s" % s,
mf([{'month', 'num'}, s, {'day', 'num'}, s,
{'year', 'num', 'word'}],
{"date", 'short', 'mdy', "sep_%s" % s}, date_tuple))
for s in SEPARATORS]
# Non separated
matchers += [('ymd_dates',
mf([{'year', 'num'}, {'month', 'num'}, {'day', 'num'}],
{"date", 'short', 'ymd', "nosep"}, date_tuple)),
('dmy_dates',
mf([{'day', 'num'}, {'month', 'num'}, {'year', 'num'}],
{"date", 'short', 'dmy', "nosep"}, date_tuple)),
("mdy_dates",
mf([{'month', 'num'}, {'day', 'num'}, {'year', 'num'}],
{"date", 'short', 'mdy', "sep_%s"}, date_tuple)), ]
range_symbols = ur"(-|\sto\s|\suntil\s|\xe2\x80\x93|\xe2\x80\x94|\u2013|\u2014)"
matchers += [
# Date range
("range", mf([{"date"},
ur"\s*"+ range_symbols + ur"\s*",
{"date"}],
{"range"}, date_range)),
# November 20, 1876 in Shusha, Russian Empire – February 1, 1944 in Yerevan
("range_place", mf([{"date"},
ur"\s+in\s+.*" + range_symbols + ur"\s*",
{"date"}],
{"range", "with_place"}, date_range)),
]
matchers += [
# 424/423 BC
("conditional_date_slash",
mf([{"date"},
ur"\s*/\s*",
{"date"}],
{"conditional", "slash", "date"},
date_conditional)),
# 427 or 424
("conditional_date_to",
mf([{"date"},
ur"\s+or\s+",
{"date"}],
{"conditional", "or", "date"},
date_conditional)),
]
def just_props(text, *props_lst, **kw):
t = OverlayedText(text)
t.overlay([m for n, m in matchers])
ovls = itertools.chain(*[t.get_overlays(props=props) for props in
props_lst])
values = kw.get('values', True)
return [i.value if values else i
for i in sorted(longest_overlap(ovls),
key=lambda o: o.start)]
def just_dates(text):
return just_props(text, {'date'})
def just_ranges(text):
return just_props(text, {'range'})
if __name__ == "__main__":
from pprint import pprint
pprint(just_dates("Timestamp: 22071991, well\
i said i was on July 22 1992 but I lied."))
|
fakedrake/overlay_parse
|
overlay_parse/dates.py
|
longest_overlap
|
python
|
def longest_overlap(ovls):
# Ovls know how to compare to each other.
ovls = sorted(ovls)
# I know this could be better but ovls wont be more than 50 or so.
for i, s in enumerate(ovls):
passing = True
for l in ovls[i + 1:]:
if s.start in Rng(l.start, l.end, rng=(True, True)) or \
s.end in Rng(l.start, l.end, rng=(True, True)):
passing = False
break
if passing:
yield s
|
From a list of overlays if any overlap keep the longest.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/dates.py#L81-L100
| null |
# -*- coding: utf-8 -*-
from datetime import date
import re
import itertools
from .overlays import OverlayedText
from .matchers import mf
from .util import w, words, starts_with, rx_int, rx_int_extra, Rng
def month_names(rxmatch):
for i, m in enumerate(MONTH_NAMES_LONG):
if starts_with(m, rxmatch.group(0)):
return i + 1
def date_mean(d1, d2):
return tuple([(i1 + i2) / 2 for i1, i2 in zip(d1, d2)])
def date_min(d1, d2):
for i1, i2 in reversed(zip(d1, d2)):
if i2 < i1:
return d2, d1
if i1 < i2:
return d1, d2
def date_pair(ovls):
d2 = ovls[2].value
d1 = ovls[0].value
if d2[2] < 0 and d1[2] > 0:
d, m, y = d1
return date_min((d, m, -y), d2)
return date_min(d1, d2)
def date_range(ovls):
return date_pair(ovls)
def date_conditional(ovls):
e, o = date_pair(ovls)
return date_mean(e, o)
def present(rxmatch):
d = date.today()
return (d.day, d.month, d.year)
def date_tuple(ovls):
"""
We should have a list of overlays from which to extract day month
year.
"""
day = month = year = 0
for o in ovls:
if 'day' in o.props:
day = o.value
if 'month' in o.props:
month = o.value
if 'year' in o.props:
year = o.value
if 'date' in o.props:
day, month, year = [(o or n) for o, n in zip((day, month,
year), o.value)]
return (day, month, year)
MONTH_NAMES_LONG = [
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December",
]
MONTH_NAMES_SHORT = [
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
]
matchers = [
# Regex
('day', mf(r"(0[1-9]|[12][0-9]|3[01]|[1-9])",
{'day', 'num'}, rx_int)),
('day', mf(w(r"(0[1-9]|[12][0-9]|3[01]|[1-9])"),
{'day', 'num', 'word'}, rx_int)),
('day_numeric', mf(w(r"(11th|12th|13th|[012][4-9]th|[4-9]th|[123]0th|[0-3]"
"1st|[02]2nd|023rd)"),
{'day', 'numeric'}, rx_int_extra)),
# Note that regexes are greedy. If there is '07' then '7' alone
# will be ignored
('month', mf(r"(0[1-9]|1[012]|[1-9])", {'month', 'num'}, rx_int)),
('year_4', mf(r"\d{4}", {'year', '4dig', 'num'}, rx_int)),
('year_num', mf(w(r"\d+\s*([Aa]\.?[Dd]\.?)"), {'year', 'adbc', 'num',
"ad", 'word'},
rx_int_extra)),
('year_adbc', mf(w(r"\d+\s*([Bb]\.?[Cc]\.?([Ee]\.?)?)"), {"year", "adbc",
"bc", 'word'},
lambda rxmatch: -rx_int_extra(rxmatch))),
('year_num_word', mf(w(r"\d{1,4}"), {'year', 'num', 'word'},
rx_int)),
('month_name_short', mf(
re.compile(r"(%s)" % "|".join(words(MONTH_NAMES_SHORT)), re.I),
{"month", "name", "shorte"}, month_names)),
('month_name_long', mf(
re.compile(r"(%s)" % "|".join(words(MONTH_NAMES_LONG)), re.I),
{"month", "name", "long"}, month_names)),
# Note that instead of rx or sets you can use a matcher, it will
# be a dependency
# Lists
# June 1991
("far_year", mf([{"month", "name"}, r"\s+", {"year", '4dig'}],
{"date", "year_month"},
date_tuple)),
# July the 14th
('dayn_month_date', mf([{'month', 'name'},
r",?\s*(the)?\s+",
{'day', 'numeric'}],
{"day_month", "numeric", "date"}, date_tuple)),
# July 14
('dayn_month_date', mf([{'month', 'name'}, r"\s+", {'day', 'word'}],
{"day_month", "date"}, date_tuple)),
# July the 14th 1991
('dayn_month_year_date', mf([{'day_month'}, ur"(\s+|\s*,\s*)",
{"year", "word"}],
{"day_month_year", "numeric", "date", "full"},
date_tuple)),
# 14 July 1991
('day_month_year_full', mf([{"day"}, r"\s+(of\s+)?",
{"month", "name"}, r"\s+",
{"year", "word"}],
{"day_month_year", "date"},
date_tuple)),
# 3000AD
("far_year", mf([{"year", 'word'}],
{"date", "only_year"},
date_tuple)),
# July 13, 1991
('month_day_year', mf([{'day_month'}, ur"(\s+|\s*,?\s*)", "year"],
{"month_day_year", "date"},
date_tuple)),
# Present
('present', mf(
r"([pP]resent|[Tt]oday|[Nn]ow)", {"date", "present"}, present)),
]
# Short dates
SEPARATORS = [r"/", r"\.", r"\|", r"-"]
matchers += [('ymd_dates_%s' % s,
mf([{'year', 'num', 'word'}, s, {'month', 'num'},
s, {'day', 'num'}],
{"date", 'short', 'ymd', "sep_%s" % s}, date_tuple))
for s in SEPARATORS]
matchers += [('dmy_dates_%s' % s,
mf([{'day', 'num'}, s, {'month', 'num'}, s,
{'year', 'num', 'word'}],
{"date", 'short', 'dmy', "sep_%s" % s}, date_tuple))
for s in SEPARATORS]
matchers += [("mdy_dates_%s" % s,
mf([{'month', 'num'}, s, {'day', 'num'}, s,
{'year', 'num', 'word'}],
{"date", 'short', 'mdy', "sep_%s" % s}, date_tuple))
for s in SEPARATORS]
# Non separated
matchers += [('ymd_dates',
mf([{'year', 'num'}, {'month', 'num'}, {'day', 'num'}],
{"date", 'short', 'ymd', "nosep"}, date_tuple)),
('dmy_dates',
mf([{'day', 'num'}, {'month', 'num'}, {'year', 'num'}],
{"date", 'short', 'dmy', "nosep"}, date_tuple)),
("mdy_dates",
mf([{'month', 'num'}, {'day', 'num'}, {'year', 'num'}],
{"date", 'short', 'mdy', "sep_%s"}, date_tuple)), ]
range_symbols = ur"(-|\sto\s|\suntil\s|\xe2\x80\x93|\xe2\x80\x94|\u2013|\u2014)"
matchers += [
# Date range
("range", mf([{"date"},
ur"\s*"+ range_symbols + ur"\s*",
{"date"}],
{"range"}, date_range)),
# November 20, 1876 in Shusha, Russian Empire – February 1, 1944 in Yerevan
("range_place", mf([{"date"},
ur"\s+in\s+.*" + range_symbols + ur"\s*",
{"date"}],
{"range", "with_place"}, date_range)),
]
matchers += [
# 424/423 BC
("conditional_date_slash",
mf([{"date"},
ur"\s*/\s*",
{"date"}],
{"conditional", "slash", "date"},
date_conditional)),
# 427 or 424
("conditional_date_to",
mf([{"date"},
ur"\s+or\s+",
{"date"}],
{"conditional", "or", "date"},
date_conditional)),
]
def just_props(text, *props_lst, **kw):
t = OverlayedText(text)
t.overlay([m for n, m in matchers])
ovls = itertools.chain(*[t.get_overlays(props=props) for props in
props_lst])
values = kw.get('values', True)
return [i.value if values else i
for i in sorted(longest_overlap(ovls),
key=lambda o: o.start)]
def just_dates(text):
return just_props(text, {'date'})
def just_ranges(text):
return just_props(text, {'range'})
if __name__ == "__main__":
from pprint import pprint
pprint(just_dates("Timestamp: 22071991, well\
i said i was on July 22 1992 but I lied."))
|
fakedrake/overlay_parse
|
overlay_parse/overlays.py
|
Overlay.copy
|
python
|
def copy(self, props=None, value=None):
return Overlay(self.text,
(self.start, self.end),
props=props or self.props,
value=value or self.value)
|
Copy the Overlay possibly overriding props.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/overlays.py#L41-L49
| null |
class Overlay(object):
def __init__(self, text, rng, props=None, value=None):
"""
:param text: The text this overlay refers to.
:param start: The starting index of the overlay.
:param end: The end index of the overlay.
:param props: A list of strings that are the properties of the
overlay.
:param value: The value that this text represents.
"""
(start, end) = rng
self.text = text
self.start = start
self.end = end
self.value = value
self.priority = False
self.set_props(props)
def __lt__(self, ovl):
if self.priority < ovl.priority:
return True
return len(self) < len(ovl)
def set_props(self, props=None):
"""
Set props of this overlay or clear them.
"""
self.props = props or set()
def __str__(self):
"""
The text tha this overlay matches.
"""
return unicode(self.string())
def __len__(self):
return self.end - self.start
def before(self):
"""
The text before the overlay.
"""
return self.text[:self.start]
def after(self):
"""
The entire text after the overlay.
"""
return self.text[self.end:]
def until(self, ovl):
"""
The text separating overlays.
"""
return self.text[self.end:ovl.start]
def string(self):
return self.text[self.start:self.end]
def merge(self, ovl):
if not ovl:
return self
if self.text != ovl.text:
raise ValueError("Overlays refer to different texts.")
s = min(self.start, ovl.start)
e = max(self.end, ovl.end)
return Overlay(self.text, (s, e), self.props.union(ovl.props))
def __eq__(self, ov):
return self.start == ov.start and \
self.end == ov.end and \
unicode(self.text) == unicode(ov.text)
def __repr__(self):
return u"<Overlay object at [%d, %d), props: %s, text: '%s'>" % (
self.start, self.end, self.props, unicode(self))
def match(self, props=None, rng=None, offset=None):
"""
Provide any of the args and match or dont.
:param props: Should be a subset of my props.
:param rng: Exactly match my range.
:param offset: I start after this offset.
:returns: True if all the provided predicates match or are None
"""
if rng:
s, e = rng
else:
e = s = None
return ((e is None or self.end == e) and
(s is None or self.start == s)) and \
(props is None or props.issubset(self.props)) and \
(offset is None or self.start >= offset)
|
fakedrake/overlay_parse
|
overlay_parse/overlays.py
|
Overlay.match
|
python
|
def match(self, props=None, rng=None, offset=None):
if rng:
s, e = rng
else:
e = s = None
return ((e is None or self.end == e) and
(s is None or self.start == s)) and \
(props is None or props.issubset(self.props)) and \
(offset is None or self.start >= offset)
|
Provide any of the args and match or dont.
:param props: Should be a subset of my props.
:param rng: Exactly match my range.
:param offset: I start after this offset.
:returns: True if all the provided predicates match or are None
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/overlays.py#L106-L124
| null |
class Overlay(object):
def __init__(self, text, rng, props=None, value=None):
"""
:param text: The text this overlay refers to.
:param start: The starting index of the overlay.
:param end: The end index of the overlay.
:param props: A list of strings that are the properties of the
overlay.
:param value: The value that this text represents.
"""
(start, end) = rng
self.text = text
self.start = start
self.end = end
self.value = value
self.priority = False
self.set_props(props)
def __lt__(self, ovl):
if self.priority < ovl.priority:
return True
return len(self) < len(ovl)
def set_props(self, props=None):
"""
Set props of this overlay or clear them.
"""
self.props = props or set()
def copy(self, props=None, value=None):
"""
Copy the Overlay possibly overriding props.
"""
return Overlay(self.text,
(self.start, self.end),
props=props or self.props,
value=value or self.value)
def __str__(self):
"""
The text tha this overlay matches.
"""
return unicode(self.string())
def __len__(self):
return self.end - self.start
def before(self):
"""
The text before the overlay.
"""
return self.text[:self.start]
def after(self):
"""
The entire text after the overlay.
"""
return self.text[self.end:]
def until(self, ovl):
"""
The text separating overlays.
"""
return self.text[self.end:ovl.start]
def string(self):
return self.text[self.start:self.end]
def merge(self, ovl):
if not ovl:
return self
if self.text != ovl.text:
raise ValueError("Overlays refer to different texts.")
s = min(self.start, ovl.start)
e = max(self.end, ovl.end)
return Overlay(self.text, (s, e), self.props.union(ovl.props))
def __eq__(self, ov):
return self.start == ov.start and \
self.end == ov.end and \
unicode(self.text) == unicode(ov.text)
def __repr__(self):
return u"<Overlay object at [%d, %d), props: %s, text: '%s'>" % (
self.start, self.end, self.props, unicode(self))
|
fakedrake/overlay_parse
|
overlay_parse/overlays.py
|
OverlayedText.overlays_at
|
python
|
def overlays_at(self, key):
if isinstance(key, slice):
s, e, _ = key.indices(len(self.text))
else:
s = e = key
return [o for o in self.overlays if o.start in Rng(s, e)]
|
Key may be a slice or a point.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/overlays.py#L164-L174
| null |
class OverlayedText(object):
"""
Both the text and it's overlays.
"""
def __init__(self, text, overlays=None):
self.text = text
self.overlays = overlays or []
self._ran_matchers = []
def copy(self):
t = OverlayedText(self.text, [o.copy() for o in self.overlays])
t._ran_matchers = [i for i in self._ran_matchers]
return t
def __unicode__(self):
try:
return unicode(self.text)
except UnicodeDecodeError:
ascii_text = str(self.text).encode('string_escape')
return unicode(ascii_text)
def __str__(self):
try:
return str(self.text)
except UnicodeEncodeError:
ascii_text = unicode(self.text).encode('unicode_escape')
return ascii_text
def __repr__(self):
return unicode(self.text)
def __getitem__(self, key):
return OverlayedText(self.text.__getitem__(key),
overlays=self.overlays_at(key))
def overlay(self, matchers, force=False):
"""
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
"""
for m in matchers:
if m in self._ran_matchers:
continue
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self))
self.overlays.sort(key=lambda o: o.start, reverse=True)
def get_overlays(self, **kw):
"""
See Overlay.match() for arguments.
"""
return [o for o in self.overlays if o.match(**kw)]
|
fakedrake/overlay_parse
|
overlay_parse/overlays.py
|
OverlayedText.overlay
|
python
|
def overlay(self, matchers, force=False):
for m in matchers:
if m in self._ran_matchers:
continue
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self))
self.overlays.sort(key=lambda o: o.start, reverse=True)
|
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/overlays.py#L176-L191
| null |
class OverlayedText(object):
"""
Both the text and it's overlays.
"""
def __init__(self, text, overlays=None):
self.text = text
self.overlays = overlays or []
self._ran_matchers = []
def copy(self):
t = OverlayedText(self.text, [o.copy() for o in self.overlays])
t._ran_matchers = [i for i in self._ran_matchers]
return t
def __unicode__(self):
try:
return unicode(self.text)
except UnicodeDecodeError:
ascii_text = str(self.text).encode('string_escape')
return unicode(ascii_text)
def __str__(self):
try:
return str(self.text)
except UnicodeEncodeError:
ascii_text = unicode(self.text).encode('unicode_escape')
return ascii_text
def __repr__(self):
return unicode(self.text)
def __getitem__(self, key):
return OverlayedText(self.text.__getitem__(key),
overlays=self.overlays_at(key))
def overlays_at(self, key):
"""
Key may be a slice or a point.
"""
if isinstance(key, slice):
s, e, _ = key.indices(len(self.text))
else:
s = e = key
return [o for o in self.overlays if o.start in Rng(s, e)]
def get_overlays(self, **kw):
"""
See Overlay.match() for arguments.
"""
return [o for o in self.overlays if o.match(**kw)]
|
fakedrake/overlay_parse
|
overlay_parse/overlays.py
|
OverlayedText.get_overlays
|
python
|
def get_overlays(self, **kw):
return [o for o in self.overlays if o.match(**kw)]
|
See Overlay.match() for arguments.
|
train
|
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/overlays.py#L193-L198
| null |
class OverlayedText(object):
"""
Both the text and it's overlays.
"""
def __init__(self, text, overlays=None):
self.text = text
self.overlays = overlays or []
self._ran_matchers = []
def copy(self):
t = OverlayedText(self.text, [o.copy() for o in self.overlays])
t._ran_matchers = [i for i in self._ran_matchers]
return t
def __unicode__(self):
try:
return unicode(self.text)
except UnicodeDecodeError:
ascii_text = str(self.text).encode('string_escape')
return unicode(ascii_text)
def __str__(self):
try:
return str(self.text)
except UnicodeEncodeError:
ascii_text = unicode(self.text).encode('unicode_escape')
return ascii_text
def __repr__(self):
return unicode(self.text)
def __getitem__(self, key):
return OverlayedText(self.text.__getitem__(key),
overlays=self.overlays_at(key))
def overlays_at(self, key):
"""
Key may be a slice or a point.
"""
if isinstance(key, slice):
s, e, _ = key.indices(len(self.text))
else:
s = e = key
return [o for o in self.overlays if o.start in Rng(s, e)]
def overlay(self, matchers, force=False):
"""
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
"""
for m in matchers:
if m in self._ran_matchers:
continue
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self))
self.overlays.sort(key=lambda o: o.start, reverse=True)
|
tagcubeio/tagcube-cli
|
tagcube_cli/subcommands/batch.py
|
create_scans
|
python
|
def create_scans(urls_file):
cli_logger.debug('Starting to process batch input file')
created_scans = []
for line in urls_file:
line = line.strip()
if line.startswith('#'):
continue
if not line:
continue
try:
protocol, domain, port, path = parse_url(line)
except ValueError, ve:
cli_logger.debug(str(ve))
continue
for scan in created_scans:
if scan.matches(protocol, domain, port):
scan.add_path(path)
args = (path, scan.get_root_url())
cli_logger.debug('Added %s to %s' % args)
break
else:
scan = BatchScan(protocol, domain, port, path)
created_scans.append(scan)
cli_logger.debug('Added a new scan to %s' % scan.get_root_url())
cli_logger.debug('Created a total of %s scans' % len(created_scans))
return created_scans
|
This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/subcommands/batch.py#L23-L61
| null |
from urlparse import urlparse
from tagcube_cli.logger import cli_logger
def do_batch_scan(client, cmd_args):
if not client.test_auth_credentials():
raise ValueError('Invalid TagCube REST API credentials.')
cli_logger.debug('Authentication credentials are valid')
for scan in create_scans(cmd_args.urls_file):
scan_resource = client.quick_scan(scan.get_root_url(),
email_notify=cmd_args.email_notify,
scan_profile=cmd_args.scan_profile,
path_list=scan.get_paths())
# pylint: disable=E1101
args = (scan_resource.id, scan.get_root_url())
cli_logger.info('Launched scan #%s to %s' % args)
# pylint: enable=E1101
def parse_url(url):
"""
Parse a URL into the parts I need for processing:
* protocol
* domain
* port
* path
:param url: A string
:return: A tuple containing the above
"""
split_url = url.split('/', 3)
if len(split_url) == 3:
# http://foo.com
path = '/'
elif len(split_url) == 4:
path = '/' + split_url[3]
else:
raise ValueError('Invalid URL: %s' % url)
try:
parse_result = urlparse(url)
except Exception:
raise ValueError('Invalid URL: %s' % url)
protocol = parse_result.scheme
protocol = protocol.lower()
if protocol not in ('http', 'https'):
raise ValueError('Invalid URL protocol "%s"' % protocol)
split_netloc = parse_result.netloc.split(':')
domain = split_netloc[0]
domain = domain.lower()
if len(split_netloc) == 2:
try:
port = int(split_netloc[1])
except:
raise ValueError('Invalid port: "%s"' % split_netloc[1])
elif protocol == 'https':
port = 443
elif protocol == 'http':
port = 80
else:
raise ValueError('Invalid scheme: "%s"' % protocol)
return protocol, domain, port, path
class BatchScan(object):
def __init__(self, protocol, domain, port, path):
self.protocol = protocol
self.domain = domain
self.port = port
self.paths = set()
self.paths.add(path)
def get_root_url(self):
return '%s://%s:%s/' % (self.protocol, self.domain, self.port)
def matches(self, protocol, domain, port):
return (self.protocol == protocol and
self.domain == domain and
self.port == port)
def add_path(self, path):
return self.paths.add(path)
def get_paths(self):
return list(self.paths)
|
tagcubeio/tagcube-cli
|
tagcube_cli/subcommands/batch.py
|
parse_url
|
python
|
def parse_url(url):
split_url = url.split('/', 3)
if len(split_url) == 3:
# http://foo.com
path = '/'
elif len(split_url) == 4:
path = '/' + split_url[3]
else:
raise ValueError('Invalid URL: %s' % url)
try:
parse_result = urlparse(url)
except Exception:
raise ValueError('Invalid URL: %s' % url)
protocol = parse_result.scheme
protocol = protocol.lower()
if protocol not in ('http', 'https'):
raise ValueError('Invalid URL protocol "%s"' % protocol)
split_netloc = parse_result.netloc.split(':')
domain = split_netloc[0]
domain = domain.lower()
if len(split_netloc) == 2:
try:
port = int(split_netloc[1])
except:
raise ValueError('Invalid port: "%s"' % split_netloc[1])
elif protocol == 'https':
port = 443
elif protocol == 'http':
port = 80
else:
raise ValueError('Invalid scheme: "%s"' % protocol)
return protocol, domain, port, path
|
Parse a URL into the parts I need for processing:
* protocol
* domain
* port
* path
:param url: A string
:return: A tuple containing the above
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/subcommands/batch.py#L64-L110
| null |
from urlparse import urlparse
from tagcube_cli.logger import cli_logger
def do_batch_scan(client, cmd_args):
if not client.test_auth_credentials():
raise ValueError('Invalid TagCube REST API credentials.')
cli_logger.debug('Authentication credentials are valid')
for scan in create_scans(cmd_args.urls_file):
scan_resource = client.quick_scan(scan.get_root_url(),
email_notify=cmd_args.email_notify,
scan_profile=cmd_args.scan_profile,
path_list=scan.get_paths())
# pylint: disable=E1101
args = (scan_resource.id, scan.get_root_url())
cli_logger.info('Launched scan #%s to %s' % args)
# pylint: enable=E1101
def create_scans(urls_file):
"""
This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run
"""
cli_logger.debug('Starting to process batch input file')
created_scans = []
for line in urls_file:
line = line.strip()
if line.startswith('#'):
continue
if not line:
continue
try:
protocol, domain, port, path = parse_url(line)
except ValueError, ve:
cli_logger.debug(str(ve))
continue
for scan in created_scans:
if scan.matches(protocol, domain, port):
scan.add_path(path)
args = (path, scan.get_root_url())
cli_logger.debug('Added %s to %s' % args)
break
else:
scan = BatchScan(protocol, domain, port, path)
created_scans.append(scan)
cli_logger.debug('Added a new scan to %s' % scan.get_root_url())
cli_logger.debug('Created a total of %s scans' % len(created_scans))
return created_scans
class BatchScan(object):
def __init__(self, protocol, domain, port, path):
self.protocol = protocol
self.domain = domain
self.port = port
self.paths = set()
self.paths.add(path)
def get_root_url(self):
return '%s://%s:%s/' % (self.protocol, self.domain, self.port)
def matches(self, protocol, domain, port):
return (self.protocol == protocol and
self.domain == domain and
self.port == port)
def add_path(self, path):
return self.paths.add(path)
def get_paths(self):
return list(self.paths)
|
tagcubeio/tagcube-cli
|
tagcube_cli/utils.py
|
parse_config_file
|
python
|
def parse_config_file():
for filename in ('.tagcube', os.path.expanduser('~/.tagcube')):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
msg = 'TagCube configuration file "%s" does not exist'
cli_logger.debug(msg % filename)
continue
msg = 'Parsing tagcube configuration file "%s"'
cli_logger.debug(msg % filename)
email, api_key = _parse_config_file_impl(filename)
if email is not None and api_key is not None:
msg = ('Found authentication credentials:\n'
' email: %s\n'
' api_key: %s')
tokenized_api_key = '%s...%s' % (api_key[:3], api_key[-3:])
args = (email, tokenized_api_key)
cli_logger.debug(msg % args)
return email, api_key
else:
msg = 'Configuration file does not contain credentials'
cli_logger.debug(msg)
else:
return None, None
|
Find the .tagcube config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- email
- api_token
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/utils.py#L38-L73
|
[
"def _parse_config_file_impl(filename):\n \"\"\"\n Format for the file is:\n\n credentials:\n email: ...\n api_token: ...\n\n :param filename: The filename to parse\n :return: A tuple with:\n - email\n - api_token\n \"\"\"\n api_key = None\n email = None\n\n try:\n doc = yaml.load(file(filename).read())\n\n email = doc['credentials']['email']\n api_key = doc['credentials']['api_key']\n except (KeyError, TypeError):\n print(INVALID_FILE)\n return None, None\n\n except yaml.scanner.ScannerError, e:\n print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))\n return None, None\n\n # Just in case, we don't want the auth to fail because of a space\n email = email.strip()\n api_key = api_key.strip()\n\n if not is_valid_api_key(api_key):\n cli_logger.debug(INVALID_UUID)\n api_key = None\n\n if not is_valid_email(email):\n cli_logger.debug('Invalid email address: %s' % email)\n email = None\n\n return email, api_key\n"
] |
import re
import os
import yaml
import argparse
from tagcube_cli.logger import cli_logger
INVALID_UUID = ('Invalid REST API key, the right format looks like'
' 208e57a8-1173-49c9-b5f3-e15535e70e83 (include the dashes and'
' verify length)')
INVALID_FILE = '''\
Invalid .tagcube configuration file found, the expected format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
SYNTAX_ERROR_FILE = '''\
Invalid .tagcube configuration file format, the parser returned "%s" at line %s.
The expected .tagcube file format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
def parse_config_file():
"""
Find the .tagcube config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- email
- api_token
"""
for filename in ('.tagcube', os.path.expanduser('~/.tagcube')):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
msg = 'TagCube configuration file "%s" does not exist'
cli_logger.debug(msg % filename)
continue
msg = 'Parsing tagcube configuration file "%s"'
cli_logger.debug(msg % filename)
email, api_key = _parse_config_file_impl(filename)
if email is not None and api_key is not None:
msg = ('Found authentication credentials:\n'
' email: %s\n'
' api_key: %s')
tokenized_api_key = '%s...%s' % (api_key[:3], api_key[-3:])
args = (email, tokenized_api_key)
cli_logger.debug(msg % args)
return email, api_key
else:
msg = 'Configuration file does not contain credentials'
cli_logger.debug(msg)
else:
return None, None
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
email: ...
api_token: ...
:param filename: The filename to parse
:return: A tuple with:
- email
- api_token
"""
api_key = None
email = None
try:
doc = yaml.load(file(filename).read())
email = doc['credentials']['email']
api_key = doc['credentials']['api_key']
except (KeyError, TypeError):
print(INVALID_FILE)
return None, None
except yaml.scanner.ScannerError, e:
print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))
return None, None
# Just in case, we don't want the auth to fail because of a space
email = email.strip()
api_key = api_key.strip()
if not is_valid_api_key(api_key):
cli_logger.debug(INVALID_UUID)
api_key = None
if not is_valid_email(email):
cli_logger.debug('Invalid email address: %s' % email)
email = None
return email, api_key
def get_config_from_env():
return (os.environ.get('TAGCUBE_EMAIL', None),
os.environ.get('TAGCUBE_API_KEY', None))
def is_valid_email(email):
"""
Very trivial check to verify that the user provided parameter is an email
"""
return '@' in email and '.' in email
def is_valid_api_key(api_key):
"""
API keys are UUID4(), so we just check that the length and format is the
expected one.
:param api_key:
:return:
"""
uuid_re = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
return bool(re.match(uuid_re, api_key))
def is_valid_path(path):
"""
:return: True if the path is valid, else raise a ValueError with the
specific error
"""
if not path.startswith('/'):
msg = 'Invalid path "%s". Paths need to start with "/".'
raise ValueError(msg % path[:40])
for c in ' \t':
if c in path:
msg = ('Invalid character "%s" found in path. Paths need to be'
' URL-encoded.')
raise ValueError(msg % c)
return True
def argparse_email_type(email):
if not is_valid_email(email):
msg = '%s is not a valid email address.'
raise argparse.ArgumentTypeError(msg % email)
return email
def argparse_uuid_type(api_key):
if not is_valid_api_key(api_key):
raise argparse.ArgumentTypeError(INVALID_UUID)
return api_key
def argparse_url_type(url):
if url.startswith('http://'):
return url
if url.startswith('https://'):
return url
msg = '%s is not a valid URL.'
raise argparse.ArgumentTypeError(msg % url)
def argparse_path_list_type(path_file):
if not os.path.exists(path_file):
msg = 'The provided --path-file does not exist'
raise argparse.ArgumentTypeError(msg)
try:
file(path_file)
except:
msg = 'The provided --path-file can not be read'
raise argparse.ArgumentTypeError(msg)
try:
return path_file_to_list(path_file)
except ValueError, ve:
raise argparse.ArgumentTypeError(str(ve))
def path_file_to_list(path_file):
"""
:return: A list with the paths which are stored in a text file in a line-by-
line format. Validate each path using is_valid_path
"""
paths = []
path_file_fd = file(path_file)
for line_no, line in enumerate(path_file_fd.readlines(), start=1):
line = line.strip()
if not line:
# Blank line support
continue
if line.startswith('#'):
# Comment support
continue
try:
is_valid_path(line)
paths.append(line)
except ValueError, ve:
args = (ve, path_file, line_no)
raise ValueError('%s error found in %s:%s.' % args)
return paths
|
tagcubeio/tagcube-cli
|
tagcube_cli/utils.py
|
_parse_config_file_impl
|
python
|
def _parse_config_file_impl(filename):
api_key = None
email = None
try:
doc = yaml.load(file(filename).read())
email = doc['credentials']['email']
api_key = doc['credentials']['api_key']
except (KeyError, TypeError):
print(INVALID_FILE)
return None, None
except yaml.scanner.ScannerError, e:
print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))
return None, None
# Just in case, we don't want the auth to fail because of a space
email = email.strip()
api_key = api_key.strip()
if not is_valid_api_key(api_key):
cli_logger.debug(INVALID_UUID)
api_key = None
if not is_valid_email(email):
cli_logger.debug('Invalid email address: %s' % email)
email = None
return email, api_key
|
Format for the file is:
credentials:
email: ...
api_token: ...
:param filename: The filename to parse
:return: A tuple with:
- email
- api_token
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/utils.py#L76-L117
|
[
"def is_valid_api_key(api_key):\n \"\"\"\n API keys are UUID4(), so we just check that the length and format is the\n expected one.\n\n :param api_key:\n :return:\n \"\"\"\n uuid_re = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'\n return bool(re.match(uuid_re, api_key))\n",
"def is_valid_email(email):\n \"\"\"\n Very trivial check to verify that the user provided parameter is an email\n \"\"\"\n return '@' in email and '.' in email\n"
] |
import re
import os
import yaml
import argparse
from tagcube_cli.logger import cli_logger
INVALID_UUID = ('Invalid REST API key, the right format looks like'
' 208e57a8-1173-49c9-b5f3-e15535e70e83 (include the dashes and'
' verify length)')
INVALID_FILE = '''\
Invalid .tagcube configuration file found, the expected format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
SYNTAX_ERROR_FILE = '''\
Invalid .tagcube configuration file format, the parser returned "%s" at line %s.
The expected .tagcube file format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
def parse_config_file():
"""
Find the .tagcube config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- email
- api_token
"""
for filename in ('.tagcube', os.path.expanduser('~/.tagcube')):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
msg = 'TagCube configuration file "%s" does not exist'
cli_logger.debug(msg % filename)
continue
msg = 'Parsing tagcube configuration file "%s"'
cli_logger.debug(msg % filename)
email, api_key = _parse_config_file_impl(filename)
if email is not None and api_key is not None:
msg = ('Found authentication credentials:\n'
' email: %s\n'
' api_key: %s')
tokenized_api_key = '%s...%s' % (api_key[:3], api_key[-3:])
args = (email, tokenized_api_key)
cli_logger.debug(msg % args)
return email, api_key
else:
msg = 'Configuration file does not contain credentials'
cli_logger.debug(msg)
else:
return None, None
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
email: ...
api_token: ...
:param filename: The filename to parse
:return: A tuple with:
- email
- api_token
"""
api_key = None
email = None
try:
doc = yaml.load(file(filename).read())
email = doc['credentials']['email']
api_key = doc['credentials']['api_key']
except (KeyError, TypeError):
print(INVALID_FILE)
return None, None
except yaml.scanner.ScannerError, e:
print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))
return None, None
# Just in case, we don't want the auth to fail because of a space
email = email.strip()
api_key = api_key.strip()
if not is_valid_api_key(api_key):
cli_logger.debug(INVALID_UUID)
api_key = None
if not is_valid_email(email):
cli_logger.debug('Invalid email address: %s' % email)
email = None
return email, api_key
def get_config_from_env():
return (os.environ.get('TAGCUBE_EMAIL', None),
os.environ.get('TAGCUBE_API_KEY', None))
def is_valid_email(email):
"""
Very trivial check to verify that the user provided parameter is an email
"""
return '@' in email and '.' in email
def is_valid_api_key(api_key):
"""
API keys are UUID4(), so we just check that the length and format is the
expected one.
:param api_key:
:return:
"""
uuid_re = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
return bool(re.match(uuid_re, api_key))
def is_valid_path(path):
"""
:return: True if the path is valid, else raise a ValueError with the
specific error
"""
if not path.startswith('/'):
msg = 'Invalid path "%s". Paths need to start with "/".'
raise ValueError(msg % path[:40])
for c in ' \t':
if c in path:
msg = ('Invalid character "%s" found in path. Paths need to be'
' URL-encoded.')
raise ValueError(msg % c)
return True
def argparse_email_type(email):
if not is_valid_email(email):
msg = '%s is not a valid email address.'
raise argparse.ArgumentTypeError(msg % email)
return email
def argparse_uuid_type(api_key):
if not is_valid_api_key(api_key):
raise argparse.ArgumentTypeError(INVALID_UUID)
return api_key
def argparse_url_type(url):
if url.startswith('http://'):
return url
if url.startswith('https://'):
return url
msg = '%s is not a valid URL.'
raise argparse.ArgumentTypeError(msg % url)
def argparse_path_list_type(path_file):
if not os.path.exists(path_file):
msg = 'The provided --path-file does not exist'
raise argparse.ArgumentTypeError(msg)
try:
file(path_file)
except:
msg = 'The provided --path-file can not be read'
raise argparse.ArgumentTypeError(msg)
try:
return path_file_to_list(path_file)
except ValueError, ve:
raise argparse.ArgumentTypeError(str(ve))
def path_file_to_list(path_file):
"""
:return: A list with the paths which are stored in a text file in a line-by-
line format. Validate each path using is_valid_path
"""
paths = []
path_file_fd = file(path_file)
for line_no, line in enumerate(path_file_fd.readlines(), start=1):
line = line.strip()
if not line:
# Blank line support
continue
if line.startswith('#'):
# Comment support
continue
try:
is_valid_path(line)
paths.append(line)
except ValueError, ve:
args = (ve, path_file, line_no)
raise ValueError('%s error found in %s:%s.' % args)
return paths
|
tagcubeio/tagcube-cli
|
tagcube_cli/utils.py
|
is_valid_path
|
python
|
def is_valid_path(path):
if not path.startswith('/'):
msg = 'Invalid path "%s". Paths need to start with "/".'
raise ValueError(msg % path[:40])
for c in ' \t':
if c in path:
msg = ('Invalid character "%s" found in path. Paths need to be'
' URL-encoded.')
raise ValueError(msg % c)
return True
|
:return: True if the path is valid, else raise a ValueError with the
specific error
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/utils.py#L144-L159
| null |
import re
import os
import yaml
import argparse
from tagcube_cli.logger import cli_logger
INVALID_UUID = ('Invalid REST API key, the right format looks like'
' 208e57a8-1173-49c9-b5f3-e15535e70e83 (include the dashes and'
' verify length)')
INVALID_FILE = '''\
Invalid .tagcube configuration file found, the expected format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
SYNTAX_ERROR_FILE = '''\
Invalid .tagcube configuration file format, the parser returned "%s" at line %s.
The expected .tagcube file format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
def parse_config_file():
"""
Find the .tagcube config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- email
- api_token
"""
for filename in ('.tagcube', os.path.expanduser('~/.tagcube')):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
msg = 'TagCube configuration file "%s" does not exist'
cli_logger.debug(msg % filename)
continue
msg = 'Parsing tagcube configuration file "%s"'
cli_logger.debug(msg % filename)
email, api_key = _parse_config_file_impl(filename)
if email is not None and api_key is not None:
msg = ('Found authentication credentials:\n'
' email: %s\n'
' api_key: %s')
tokenized_api_key = '%s...%s' % (api_key[:3], api_key[-3:])
args = (email, tokenized_api_key)
cli_logger.debug(msg % args)
return email, api_key
else:
msg = 'Configuration file does not contain credentials'
cli_logger.debug(msg)
else:
return None, None
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
email: ...
api_token: ...
:param filename: The filename to parse
:return: A tuple with:
- email
- api_token
"""
api_key = None
email = None
try:
doc = yaml.load(file(filename).read())
email = doc['credentials']['email']
api_key = doc['credentials']['api_key']
except (KeyError, TypeError):
print(INVALID_FILE)
return None, None
except yaml.scanner.ScannerError, e:
print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))
return None, None
# Just in case, we don't want the auth to fail because of a space
email = email.strip()
api_key = api_key.strip()
if not is_valid_api_key(api_key):
cli_logger.debug(INVALID_UUID)
api_key = None
if not is_valid_email(email):
cli_logger.debug('Invalid email address: %s' % email)
email = None
return email, api_key
def get_config_from_env():
return (os.environ.get('TAGCUBE_EMAIL', None),
os.environ.get('TAGCUBE_API_KEY', None))
def is_valid_email(email):
"""
Very trivial check to verify that the user provided parameter is an email
"""
return '@' in email and '.' in email
def is_valid_api_key(api_key):
"""
API keys are UUID4(), so we just check that the length and format is the
expected one.
:param api_key:
:return:
"""
uuid_re = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
return bool(re.match(uuid_re, api_key))
def argparse_email_type(email):
if not is_valid_email(email):
msg = '%s is not a valid email address.'
raise argparse.ArgumentTypeError(msg % email)
return email
def argparse_uuid_type(api_key):
if not is_valid_api_key(api_key):
raise argparse.ArgumentTypeError(INVALID_UUID)
return api_key
def argparse_url_type(url):
if url.startswith('http://'):
return url
if url.startswith('https://'):
return url
msg = '%s is not a valid URL.'
raise argparse.ArgumentTypeError(msg % url)
def argparse_path_list_type(path_file):
if not os.path.exists(path_file):
msg = 'The provided --path-file does not exist'
raise argparse.ArgumentTypeError(msg)
try:
file(path_file)
except:
msg = 'The provided --path-file can not be read'
raise argparse.ArgumentTypeError(msg)
try:
return path_file_to_list(path_file)
except ValueError, ve:
raise argparse.ArgumentTypeError(str(ve))
def path_file_to_list(path_file):
"""
:return: A list with the paths which are stored in a text file in a line-by-
line format. Validate each path using is_valid_path
"""
paths = []
path_file_fd = file(path_file)
for line_no, line in enumerate(path_file_fd.readlines(), start=1):
line = line.strip()
if not line:
# Blank line support
continue
if line.startswith('#'):
# Comment support
continue
try:
is_valid_path(line)
paths.append(line)
except ValueError, ve:
args = (ve, path_file, line_no)
raise ValueError('%s error found in %s:%s.' % args)
return paths
|
tagcubeio/tagcube-cli
|
tagcube_cli/utils.py
|
path_file_to_list
|
python
|
def path_file_to_list(path_file):
paths = []
path_file_fd = file(path_file)
for line_no, line in enumerate(path_file_fd.readlines(), start=1):
line = line.strip()
if not line:
# Blank line support
continue
if line.startswith('#'):
# Comment support
continue
try:
is_valid_path(line)
paths.append(line)
except ValueError, ve:
args = (ve, path_file, line_no)
raise ValueError('%s error found in %s:%s.' % args)
return paths
|
:return: A list with the paths which are stored in a text file in a line-by-
line format. Validate each path using is_valid_path
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/utils.py#L205-L231
|
[
"def is_valid_path(path):\n \"\"\"\n :return: True if the path is valid, else raise a ValueError with the\n specific error\n \"\"\"\n if not path.startswith('/'):\n msg = 'Invalid path \"%s\". Paths need to start with \"/\".'\n raise ValueError(msg % path[:40])\n\n for c in ' \\t':\n if c in path:\n msg = ('Invalid character \"%s\" found in path. Paths need to be'\n ' URL-encoded.')\n raise ValueError(msg % c)\n\n return True\n"
] |
import re
import os
import yaml
import argparse
from tagcube_cli.logger import cli_logger
INVALID_UUID = ('Invalid REST API key, the right format looks like'
' 208e57a8-1173-49c9-b5f3-e15535e70e83 (include the dashes and'
' verify length)')
INVALID_FILE = '''\
Invalid .tagcube configuration file found, the expected format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
SYNTAX_ERROR_FILE = '''\
Invalid .tagcube configuration file format, the parser returned "%s" at line %s.
The expected .tagcube file format is:
credentials:
email: ...
api_key: ...
Replace the dots with your username and REST API key and try again.
Remember that YAML does not support tabs, spaces must be used to indent
"email" and "api_key".'''
def parse_config_file():
"""
Find the .tagcube config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- email
- api_token
"""
for filename in ('.tagcube', os.path.expanduser('~/.tagcube')):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
msg = 'TagCube configuration file "%s" does not exist'
cli_logger.debug(msg % filename)
continue
msg = 'Parsing tagcube configuration file "%s"'
cli_logger.debug(msg % filename)
email, api_key = _parse_config_file_impl(filename)
if email is not None and api_key is not None:
msg = ('Found authentication credentials:\n'
' email: %s\n'
' api_key: %s')
tokenized_api_key = '%s...%s' % (api_key[:3], api_key[-3:])
args = (email, tokenized_api_key)
cli_logger.debug(msg % args)
return email, api_key
else:
msg = 'Configuration file does not contain credentials'
cli_logger.debug(msg)
else:
return None, None
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
email: ...
api_token: ...
:param filename: The filename to parse
:return: A tuple with:
- email
- api_token
"""
api_key = None
email = None
try:
doc = yaml.load(file(filename).read())
email = doc['credentials']['email']
api_key = doc['credentials']['api_key']
except (KeyError, TypeError):
print(INVALID_FILE)
return None, None
except yaml.scanner.ScannerError, e:
print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))
return None, None
# Just in case, we don't want the auth to fail because of a space
email = email.strip()
api_key = api_key.strip()
if not is_valid_api_key(api_key):
cli_logger.debug(INVALID_UUID)
api_key = None
if not is_valid_email(email):
cli_logger.debug('Invalid email address: %s' % email)
email = None
return email, api_key
def get_config_from_env():
return (os.environ.get('TAGCUBE_EMAIL', None),
os.environ.get('TAGCUBE_API_KEY', None))
def is_valid_email(email):
"""
Very trivial check to verify that the user provided parameter is an email
"""
return '@' in email and '.' in email
def is_valid_api_key(api_key):
"""
API keys are UUID4(), so we just check that the length and format is the
expected one.
:param api_key:
:return:
"""
uuid_re = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
return bool(re.match(uuid_re, api_key))
def is_valid_path(path):
"""
:return: True if the path is valid, else raise a ValueError with the
specific error
"""
if not path.startswith('/'):
msg = 'Invalid path "%s". Paths need to start with "/".'
raise ValueError(msg % path[:40])
for c in ' \t':
if c in path:
msg = ('Invalid character "%s" found in path. Paths need to be'
' URL-encoded.')
raise ValueError(msg % c)
return True
def argparse_email_type(email):
if not is_valid_email(email):
msg = '%s is not a valid email address.'
raise argparse.ArgumentTypeError(msg % email)
return email
def argparse_uuid_type(api_key):
if not is_valid_api_key(api_key):
raise argparse.ArgumentTypeError(INVALID_UUID)
return api_key
def argparse_url_type(url):
if url.startswith('http://'):
return url
if url.startswith('https://'):
return url
msg = '%s is not a valid URL.'
raise argparse.ArgumentTypeError(msg % url)
def argparse_path_list_type(path_file):
if not os.path.exists(path_file):
msg = 'The provided --path-file does not exist'
raise argparse.ArgumentTypeError(msg)
try:
file(path_file)
except:
msg = 'The provided --path-file can not be read'
raise argparse.ArgumentTypeError(msg)
try:
return path_file_to_list(path_file)
except ValueError, ve:
raise argparse.ArgumentTypeError(str(ve))
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.quick_scan
|
python
|
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
|
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L97-L180
|
[
"def get_domain_from_url(url):\n return urlparse.urlparse(url).netloc.split(':')[0]\n",
"def use_ssl(url):\n return url.lower().startswith('https://')",
"def get_port_from_url(url):\n if url.lower().startswith('http://'):\n default = '80'\n elif url.lower().startswith('https://'):\n default = '443'\n else:\n default = '80'\n\n try:\n port = urlparse.urlparse(url).netloc.split(':')[1]\n except IndexError:\n return default\n else:\n return port\n",
"def low_level_scan(self, verification_resource, scan_profile_resource,\n path_list, notification_resource_list):\n \"\"\"\n Low level implementation of the scan launch which allows you to start\n a new scan when you already know the ids for the required resources.\n\n :param verification_resource: The verification associated with the\n domain resource to scan\n :param scan_profile_resource: The scan profile resource\n :param path_list: A list with the paths\n :param notification_resource_list: The notifications to use\n\n All the *_resource* parameters are obtained by calling the respective\n getters such as:\n - get_email_notification\n - get_scan_profile\n\n And are expected to be of Resource type\n\n This method's last step is to send a POST request to /1.0/scans/ using\n a post-data similar to:\n\n {\"verification_href\": \"/1.0/verifications/6\",\n \"profile_href\": \"/1.0/profiles/2\",\n \"start_time\": \"now\",\n \"email_notifications_href\": [],\n \"path_list\": [\"/\"]}'\n\n :return: The newly generated scan id\n \"\"\"\n data = {\"verification_href\": verification_resource.href,\n \"profile_href\": scan_profile_resource.href,\n \"start_time\": \"now\",\n \"email_notifications_href\": [n.href for n in notification_resource_list],\n \"path_list\": path_list}\n url = self.build_full_url('/scans/')\n return self.create_resource(url, data)\n",
"def get_scan_profile(self, scan_profile):\n \"\"\"\n :return: The scan profile resource (as Resource), or None\n \"\"\"\n return self.filter_resource('profiles', 'name', scan_profile)\n",
"def verification_add(self, domain_resource_id, port, is_ssl):\n \"\"\"\n Sends a POST to /1.0/verifications/ using this post-data:\n\n {\"domain_href\": \"/1.0/domains/2\",\n \"port\":80,\n \"ssl\":false}\n\n :param domain_resource_id: The domain id to verify\n :param port: The TCP port\n :param is_ssl: Boolean indicating if we should use ssl\n\n :return: The newly created resource\n \"\"\"\n data = {\"domain_href\": self.build_api_path('domains',\n domain_resource_id),\n \"port\": port,\n \"ssl\": 'true' if is_ssl else 'false'}\n url = self.build_full_url(self.VERIFICATIONS)\n return self.create_resource(url, data)\n",
"def get_latest_verification(self, domain_name, port, is_ssl):\n \"\"\"\n :return: A verification resource (as Resource), or None. If there is\n more than one verification resource available it will return\n the latest one (the one with the higher id attribute).\n \"\"\"\n filter_dict = {'port': port,\n 'ssl': 'true' if is_ssl else 'false',\n 'domain': domain_name,\n 'success': True}\n return self.multi_filter_resource('verifications', filter_dict,\n result_handler=LATEST_RESULT)\n",
"def get_email_notification(self, notif_email):\n \"\"\"\n :return: The email notification resource for notif_email, or None\n \"\"\"\n return self.filter_resource('notifications/email', 'email', notif_email)\n",
"def email_notification_add(self, notif_email, first_name='None',\n last_name='None', description=DESCRIPTION):\n \"\"\"\n Sends a POST to /1.0/notifications/email/ using this post-data:\n\n {\"email\": \"andres.riancho@gmail.com\",\n \"first_name\": \"Andres\",\n \"last_name\": \"Riancho\",\n \"description\": \"Notification email\"}\n\n :return: The id of the newly created email notification resource\n \"\"\"\n data = {\"email\": notif_email,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"description\": description}\n url = self.build_full_url('/notifications/email/')\n return self.create_resource(url, data)\n",
"def can_scan(self, verification_resource):\n \"\"\"\n Failed verifications look like this:\n {\n \"domain\": \"/1.0/domains/5\",\n \"href\": \"/1.0/verifications/2\",\n \"id\": 2,\n \"port\": 80,\n \"ssl\": false,\n \"success\": false,\n \"verification_message\": \"The HTTP response body does NOT\n contain the verification code.\"\n }\n\n Successful verifications look like this:\n {\n \"domain\": \"/1.0/domains/2\",\n \"href\": \"/1.0/verifications/3\",\n \"id\": 3,\n \"port\": 80,\n \"ssl\": false,\n \"success\": true,\n \"verification_message\": \"Verification success\"\n }\n\n :return: True if the current user can scan the specified domain\n associated with the verification\n \"\"\"\n return verification_resource.success\n",
"def get_domain(self, domain):\n \"\"\"\n :param domain: The domain to query\n :return: The domain resource (as json), or None\n \"\"\"\n return self.filter_resource('domains', 'domain', domain)\n",
"def domain_add(self, domain, description=DESCRIPTION):\n \"\"\"\n Sends a POST to /1.0/domains/ using this post-data:\n\n {\"domain\": \"www.fogfu.com\",\n \"description\":\"Added by tagcube-api\"}\n\n :param domain: The domain name to add as a new resource\n :return: The newly created resource\n \"\"\"\n data = {\"domain\": domain,\n \"description\": description}\n url = self.build_full_url(self.DOMAINS)\n return self.create_resource(url, data)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.low_level_scan
|
python
|
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
|
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L182-L218
|
[
"def create_resource(self, url, data):\n \"\"\"\n Shortcut for creating a new resource\n :return: The newly created resource as a Resource object\n \"\"\"\n status_code, json_data = self.send_request(url, data, method='POST')\n\n if status_code != 201:\n msg = 'Expected 201 status code, got %s. Failed to create resource.'\n raise TagCubeAPIException(msg % status_code)\n\n try:\n return Resource(json_data)\n except KeyError:\n # Parse the error and raise an exception, errors look like:\n # {u'error': [u'The domain foo.com already exists.']}\n error_string = u' '.join(json_data['error'])\n raise TagCubeAPIException(error_string)\n",
"def build_full_url(self, last_part):\n return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.verification_add
|
python
|
def verification_add(self, domain_resource_id, port, is_ssl):
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
|
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L226-L245
|
[
"def create_resource(self, url, data):\n \"\"\"\n Shortcut for creating a new resource\n :return: The newly created resource as a Resource object\n \"\"\"\n status_code, json_data = self.send_request(url, data, method='POST')\n\n if status_code != 201:\n msg = 'Expected 201 status code, got %s. Failed to create resource.'\n raise TagCubeAPIException(msg % status_code)\n\n try:\n return Resource(json_data)\n except KeyError:\n # Parse the error and raise an exception, errors look like:\n # {u'error': [u'The domain foo.com already exists.']}\n error_string = u' '.join(json_data['error'])\n raise TagCubeAPIException(error_string)\n",
"def build_full_url(self, last_part):\n return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)\n",
"def build_api_path(self, resource_name, last_part=''):\n return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.filter_resource
|
python
|
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
|
:return: The resource (as json), or None
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L276-L283
|
[
"def multi_filter_resource(self, resource_name, filter_dict,\n result_handler=ONE_RESULT):\n url = self.build_full_url('/%s/?%s' % (resource_name,\n urllib.urlencode(filter_dict)))\n code, _json = self.send_request(url)\n\n if isinstance(_json, dict) and 'error' in _json:\n # Catch errors like this one:\n #\n # {\"error\": \"Invalid resource lookup data provided\n # (mismatched type).\"}\n raise TagCubeAPIException(_json['error'])\n\n return RESULT_HANDLERS[result_handler](resource_name,\n filter_dict, _json)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.email_notification_add
|
python
|
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
|
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L291-L308
|
[
"def create_resource(self, url, data):\n \"\"\"\n Shortcut for creating a new resource\n :return: The newly created resource as a Resource object\n \"\"\"\n status_code, json_data = self.send_request(url, data, method='POST')\n\n if status_code != 201:\n msg = 'Expected 201 status code, got %s. Failed to create resource.'\n raise TagCubeAPIException(msg % status_code)\n\n try:\n return Resource(json_data)\n except KeyError:\n # Parse the error and raise an exception, errors look like:\n # {u'error': [u'The domain foo.com already exists.']}\n error_string = u' '.join(json_data['error'])\n raise TagCubeAPIException(error_string)\n",
"def build_full_url(self, last_part):\n return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.domain_add
|
python
|
def domain_add(self, domain, description=DESCRIPTION):
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
|
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L347-L360
|
[
"def create_resource(self, url, data):\n \"\"\"\n Shortcut for creating a new resource\n :return: The newly created resource as a Resource object\n \"\"\"\n status_code, json_data = self.send_request(url, data, method='POST')\n\n if status_code != 201:\n msg = 'Expected 201 status code, got %s. Failed to create resource.'\n raise TagCubeAPIException(msg % status_code)\n\n try:\n return Resource(json_data)\n except KeyError:\n # Parse the error and raise an exception, errors look like:\n # {u'error': [u'The domain foo.com already exists.']}\n error_string = u' '.join(json_data['error'])\n raise TagCubeAPIException(error_string)\n",
"def build_full_url(self, last_part):\n return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.get_scan
|
python
|
def get_scan(self, scan_id):
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
|
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L362-L369
|
[
"def send_request(self, url, json_data=None, method='GET'):\n if method == 'GET':\n response = self.session.get(url, verify=self.verify)\n\n elif method == 'POST':\n data = json.dumps(json_data)\n response = self.session.post(url, data=data, verify=self.verify)\n\n else:\n raise ValueError('Invalid HTTP method: \"%s\"' % method)\n\n if response.status_code == 401:\n raise IncorrectAPICredentials('Invalid TagCube API credentials')\n\n try:\n json_data = response.json()\n except ValueError:\n msg = ('TagCube REST API did not return JSON, if this issue'\n ' persists please contact support@tagcube.io')\n raise TagCubeAPIException(msg)\n\n pretty_json = json.dumps(json_data, indent=4)\n msg = 'Received %s HTTP response from the wire:\\n%s'\n api_logger.debug(msg % (response.status_code, pretty_json))\n\n # Error handling\n self.handle_api_errors(response.status_code, json_data)\n\n return response.status_code, json_data\n",
"def build_full_url(self, last_part):\n return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.create_resource
|
python
|
def create_resource(self, url, data):
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
|
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L371-L388
|
[
"def send_request(self, url, json_data=None, method='GET'):\n if method == 'GET':\n response = self.session.get(url, verify=self.verify)\n\n elif method == 'POST':\n data = json.dumps(json_data)\n response = self.session.post(url, data=data, verify=self.verify)\n\n else:\n raise ValueError('Invalid HTTP method: \"%s\"' % method)\n\n if response.status_code == 401:\n raise IncorrectAPICredentials('Invalid TagCube API credentials')\n\n try:\n json_data = response.json()\n except ValueError:\n msg = ('TagCube REST API did not return JSON, if this issue'\n ' persists please contact support@tagcube.io')\n raise TagCubeAPIException(msg)\n\n pretty_json = json.dumps(json_data, indent=4)\n msg = 'Received %s HTTP response from the wire:\\n%s'\n api_logger.debug(msg % (response.status_code, pretty_json))\n\n # Error handling\n self.handle_api_errors(response.status_code, json_data)\n\n return response.status_code, json_data\n"
] |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def handle_api_errors(self, status_code, json_data):
"""
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
"""
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube/client/api.py
|
TagCubeClient.handle_api_errors
|
python
|
def handle_api_errors(self, status_code, json_data):
error_list = []
if 'error' in json_data and len(json_data) == 1 \
and isinstance(json_data, dict) and isinstance(json_data['error'], list):
error_list = json_data['error']
elif status_code == 400:
for main_error_key in json_data:
for sub_error_key in json_data[main_error_key]:
error_list.extend(json_data[main_error_key][sub_error_key])
# Only raise an exception if we had any errors
if error_list:
error_string = u' '.join(error_list)
raise TagCubeAPIException(error_string)
|
This method parses all the HTTP responses sent by the REST API and
raises exceptions if required. Basically tries to find responses with
this format:
{
'error': ['The domain foo.com already exists.']
}
Or this other:
{
"scans": {
"__all__": [
"Not a verified domain. You need to verify..."
]
}
}
And raise TagCubeAPIException with the correct message.
:param status_code: The HTTP response code
:param json_data: The HTTP response body decoded as JSON
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L422-L460
| null |
class TagCubeClient(object):
DEFAULT_ROOT_URL = 'https://api.tagcube.io/'
API_VERSION = '1.0'
SELF_URL = '/users/~'
DOMAINS = '/domains/'
SCANS = '/scans/'
VERIFICATIONS = '/verifications/'
SCAN_PROFILES = '/profiles/'
DESCRIPTION = 'Created by TagCube REST API client'
def __init__(self, email, api_key, verbose=False):
self.email = email
self.api_key = api_key
self.session = None
self.root_url = os.environ.get('ROOT_URL', self.DEFAULT_ROOT_URL)
self.verify = self.root_url == self.DEFAULT_ROOT_URL
if not self.verify:
# Remove warnings when running tests
#
# InsecureRequestWarning: Unverified HTTPS request is being made
requests.packages.urllib3.disable_warnings()
self.set_verbose(verbose)
self.configure_requests()
def test_auth_credentials(self):
"""
:return: True when the credentials are properly configured.
"""
try:
code, _ = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return False
else:
return code == 200
def get_current_user(self):
try:
code, data = self.send_request(self.build_full_url(self.SELF_URL))
except IncorrectAPICredentials:
return None
else:
return data
def quick_scan(self, target_url, email_notify=None,
scan_profile='full_audit', path_list=('/',)):
"""
:param target_url: The target url e.g. https://www.tagcube.io/
:param email_notify: The notification email e.g. user@example.com
:param scan_profile: The name of the scan profile
:param path_list: The list of paths to use in the crawling bootstrap
The basic idea around this method is to provide users with a quick way
to start a new scan. We perform these steps:
* If the domain in the target_url is not created, we create a new
domain resource for it.
* We verify that the user's license can scan the target domain (new
or already created manually by the user)
* We'll notify about this scan via email, if no email_notify is
specified we'll use the TagCube's user email for notification. If
there is no email notification for this email, we'll create one.
* The scan will be started using the scan_profile and path_list
provided as parameter.
Lots of bad things can trigger errors. All errors trigger exceptions.
Some of the situations where you'll get errors are:
* The user's license can't scan the provided domain
* We failed to connect to the REST API
* The specified scan_profile does not exist
:return: The newly generated scan id
"""
#
# Scan profile handling
#
scan_profile_resource = self.get_scan_profile(scan_profile)
if scan_profile_resource is None:
msg = 'The specified scan profile "%s" does not exist'
raise ValueError(msg % scan_profile)
#
# Domain verification handling
#
domain = get_domain_from_url(target_url)
port = get_port_from_url(target_url)
is_ssl = use_ssl(target_url)
# First, is there a domain resource to verify?
domain_resource = self.get_domain(domain)
if domain_resource is None:
domain_resource = self.domain_add(domain)
verification_resource = self.get_latest_verification(domain_resource.domain,
port, is_ssl)
if verification_resource is None:
# This seems to be the first scan to this domain, we'll have to
# verify the client's ownership.
#
# Depending on the user's configuration, license, etc. this can
# succeed or fail
verification_resource = self.verification_add(domain_resource.id,
port, is_ssl)
if not self.can_scan(verification_resource):
msg = verification_resource.get('verification_message', '')
raise ValueError(CAN_NOT_SCAN_DOMAIN_ERROR % msg)
#
# Email notification handling
#
notif_email = self.email if email_notify is None else email_notify
email_notification_resource = self.get_email_notification(notif_email)
if email_notification_resource is None:
email_notification_resource = self.email_notification_add(notif_email)
#
# Scan!
#
return self.low_level_scan(verification_resource, scan_profile_resource,
path_list, [email_notification_resource])
def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data)
def get_scan_profile(self, scan_profile):
"""
:return: The scan profile resource (as Resource), or None
"""
return self.filter_resource('profiles', 'name', scan_profile)
def verification_add(self, domain_resource_id, port, is_ssl):
"""
Sends a POST to /1.0/verifications/ using this post-data:
{"domain_href": "/1.0/domains/2",
"port":80,
"ssl":false}
:param domain_resource_id: The domain id to verify
:param port: The TCP port
:param is_ssl: Boolean indicating if we should use ssl
:return: The newly created resource
"""
data = {"domain_href": self.build_api_path('domains',
domain_resource_id),
"port": port,
"ssl": 'true' if is_ssl else 'false'}
url = self.build_full_url(self.VERIFICATIONS)
return self.create_resource(url, data)
def get_latest_verification(self, domain_name, port, is_ssl):
"""
:return: A verification resource (as Resource), or None. If there is
more than one verification resource available it will return
the latest one (the one with the higher id attribute).
"""
filter_dict = {'port': port,
'ssl': 'true' if is_ssl else 'false',
'domain': domain_name,
'success': True}
return self.multi_filter_resource('verifications', filter_dict,
result_handler=LATEST_RESULT)
def multi_filter_resource(self, resource_name, filter_dict,
result_handler=ONE_RESULT):
url = self.build_full_url('/%s/?%s' % (resource_name,
urllib.urlencode(filter_dict)))
code, _json = self.send_request(url)
if isinstance(_json, dict) and 'error' in _json:
# Catch errors like this one:
#
# {"error": "Invalid resource lookup data provided
# (mismatched type)."}
raise TagCubeAPIException(_json['error'])
return RESULT_HANDLERS[result_handler](resource_name,
filter_dict, _json)
def filter_resource(self, resource_name, field_name, field_value,
result_handler=ONE_RESULT):
"""
:return: The resource (as json), or None
"""
return self.multi_filter_resource(resource_name,
{field_name: field_value},
result_handler=result_handler)
def get_email_notification(self, notif_email):
"""
:return: The email notification resource for notif_email, or None
"""
return self.filter_resource('notifications/email', 'email', notif_email)
def email_notification_add(self, notif_email, first_name='None',
last_name='None', description=DESCRIPTION):
"""
Sends a POST to /1.0/notifications/email/ using this post-data:
{"email": "andres.riancho@gmail.com",
"first_name": "Andres",
"last_name": "Riancho",
"description": "Notification email"}
:return: The id of the newly created email notification resource
"""
data = {"email": notif_email,
"first_name": first_name,
"last_name": last_name,
"description": description}
url = self.build_full_url('/notifications/email/')
return self.create_resource(url, data)
def can_scan(self, verification_resource):
"""
Failed verifications look like this:
{
"domain": "/1.0/domains/5",
"href": "/1.0/verifications/2",
"id": 2,
"port": 80,
"ssl": false,
"success": false,
"verification_message": "The HTTP response body does NOT
contain the verification code."
}
Successful verifications look like this:
{
"domain": "/1.0/domains/2",
"href": "/1.0/verifications/3",
"id": 3,
"port": 80,
"ssl": false,
"success": true,
"verification_message": "Verification success"
}
:return: True if the current user can scan the specified domain
associated with the verification
"""
return verification_resource.success
def get_domain(self, domain):
"""
:param domain: The domain to query
:return: The domain resource (as json), or None
"""
return self.filter_resource('domains', 'domain', domain)
def domain_add(self, domain, description=DESCRIPTION):
"""
Sends a POST to /1.0/domains/ using this post-data:
{"domain": "www.fogfu.com",
"description":"Added by tagcube-api"}
:param domain: The domain name to add as a new resource
:return: The newly created resource
"""
data = {"domain": domain,
"description": description}
url = self.build_full_url(self.DOMAINS)
return self.create_resource(url, data)
def get_scan(self, scan_id):
"""
:param scan_id: The scan ID as a string
:return: A resource containing the scan information
"""
url = self.build_full_url('%s%s' % (self.SCANS, scan_id))
_, json_data = self.send_request(url)
return Resource(json_data)
def create_resource(self, url, data):
"""
Shortcut for creating a new resource
:return: The newly created resource as a Resource object
"""
status_code, json_data = self.send_request(url, data, method='POST')
if status_code != 201:
msg = 'Expected 201 status code, got %s. Failed to create resource.'
raise TagCubeAPIException(msg % status_code)
try:
return Resource(json_data)
except KeyError:
# Parse the error and raise an exception, errors look like:
# {u'error': [u'The domain foo.com already exists.']}
error_string = u' '.join(json_data['error'])
raise TagCubeAPIException(error_string)
def set_verbose(self, verbose):
# Get level based on verbose boolean
level = logging.DEBUG if verbose else logging.CRITICAL
# Configure my own logger
api_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
api_logger.addHandler(ch)
# Configure the loggers for urllib3, requests and httplib
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(level)
requests_log.propagate = True
requests_log = logging.getLogger("requests")
requests_log.setLevel(level)
http_client.HTTPConnection.debuglevel = 1 if verbose else 0
def configure_requests(self):
self.session = requests.Session()
self.session.auth = (self.email, self.api_key)
headers = {'Content-Type': 'application/json',
'User-Agent': 'TagCubeClient %s' % __VERSION__}
self.session.headers.update(headers)
def send_request(self, url, json_data=None, method='GET'):
if method == 'GET':
response = self.session.get(url, verify=self.verify)
elif method == 'POST':
data = json.dumps(json_data)
response = self.session.post(url, data=data, verify=self.verify)
else:
raise ValueError('Invalid HTTP method: "%s"' % method)
if response.status_code == 401:
raise IncorrectAPICredentials('Invalid TagCube API credentials')
try:
json_data = response.json()
except ValueError:
msg = ('TagCube REST API did not return JSON, if this issue'
' persists please contact support@tagcube.io')
raise TagCubeAPIException(msg)
pretty_json = json.dumps(json_data, indent=4)
msg = 'Received %s HTTP response from the wire:\n%s'
api_logger.debug(msg % (response.status_code, pretty_json))
# Error handling
self.handle_api_errors(response.status_code, json_data)
return response.status_code, json_data
def build_full_url(self, last_part):
return '%s%s%s' % (self.root_url, self.API_VERSION, last_part)
def build_api_path(self, resource_name, last_part=''):
return '/%s/%s/%s' % (self.API_VERSION, resource_name, last_part)
|
tagcubeio/tagcube-cli
|
tagcube_cli/cli.py
|
TagCubeCLI.run
|
python
|
def run(self):
client = None
if self.cmd_args.subcommand in self.API_SUBCOMMAND:
email, api_key = TagCubeCLI.get_credentials(self.cmd_args)
client = TagCubeClient(email, api_key, verbose=self.cmd_args.verbose)
subcommands = {'auth': do_auth_test,
'scan': do_scan_start,
'batch': do_batch_scan,
'version': do_version}
try:
subcommands.get(self.cmd_args.subcommand)(client, self.cmd_args)
except ConnectionError, ce:
msg = 'Failed to connect to TagCube REST API: "%s"'
cli_logger.error(msg % ce)
return 3
except TagCubeAPIException, tae:
cli_logger.error('%s' % tae)
return 4
return 0
|
This method handles the user's command line arguments, for example, if
the user specified a path file we'll open it and read the contents.
Finally it will run the scan using TagCubeClient.scan(...)
:return: The exit code for our process
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/cli.py#L50-L81
|
[
"def get_credentials(cmd_args):\n \"\"\"\n :return: The email and api_key to use to connect to TagCube. This\n function will try to get the credentials from:\n * Command line arguments\n * Environment variables\n * Configuration file\n\n It will return the first match, in the order specified above.\n \"\"\"\n # Check the cmd args, return if we have something here\n cmd_credentials = cmd_args.email, cmd_args.key\n if cmd_credentials != (None, None):\n cli_logger.debug('Using command line configured credentials')\n return cmd_credentials\n\n env_email, env_api_key = get_config_from_env()\n if env_email is not None:\n if env_api_key is not None:\n cli_logger.debug('Using environment configured credentials')\n return env_email, env_api_key\n\n cfg_email, cfg_api_key = parse_config_file()\n if cfg_email is not None:\n if cfg_api_key is not None:\n cli_logger.debug('Using .tagcube file configured credentials')\n return cfg_email, cfg_api_key\n\n raise ValueError(NO_CREDENTIALS_ERROR)\n"
] |
class TagCubeCLI(object):
"""
The main class for the CLI:
* Receives parsed command line arguments
* Creates and configures a TagCubeClient instance
* Launches a scan
"""
API_SUBCOMMAND = {'auth', 'scan', 'batch'}
def __init__(self, cmd_args):
self.cmd_args = cmd_args
@classmethod
def from_cmd_args(cls, cmd_args):
return cls(cmd_args)
@staticmethod
def parse_args(args=None):
"""
:return: The result of applying argparse to sys.argv
"""
#
# The main parser
#
parser = argparse.ArgumentParser(prog='tagcube',
description=DESCRIPTION,
epilog=EPILOG)
#
# Parser for the common arguments
#
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument('--email',
required=False,
dest='email',
type=argparse_email_type,
help='The email address (user) to use when'
' sending requests to TagCube\'s REST'
' API')
common_parser.add_argument('--key',
required=False,
dest='key',
type=argparse_uuid_type,
help='The API key to authenticate with'
' TagCube\'s REST API')
common_parser.add_argument('-v',
required=False,
dest='verbose',
action='store_true',
help='Enables verbose output')
#
# Parser for common scan arguments
#
scan_common = argparse.ArgumentParser(add_help=False)
scan_common.add_argument('--scan-profile',
required=False,
dest='scan_profile',
default='full_audit',
help='The web application scan profile to use.'
' A complete list of scan profiles can be'
' retrieved from the API or Web UI.')
scan_common.add_argument('--email-notify',
required=False,
dest='email_notify',
type=argparse_email_type,
help='Email address to notify when application'
' scan finishes')
#
# Handle subcommands
#
subparsers = parser.add_subparsers(help='TagCube sub-commands',
dest='subcommand')
#
# Auth test subcommand
#
_help = ('Test configured authentication credentials and exit. No'
' target URL nor email notifications need to be configured'
' to verify the credentials.')
auth_parser = subparsers.add_parser('auth',
help=_help,
parents=[common_parser])
#
# Scan
#
scan_parser = subparsers.add_parser('scan',
help='Web application security'
' scan using TagCube',
parents=[common_parser,
scan_common])
scan_parser.add_argument('--root-url',
required=True,
dest='root_url',
type=argparse_url_type,
help='Root URL for web application security'
' scan (e.g. https://www.target.com/)')
scan_parser.add_argument('--path-file',
required=False,
dest='path_file',
default=['/'],
type=argparse_path_list_type,
help='A file specifying the URL paths (without'
' the domain name) which TagCube will use'
' to bootstrap the web crawler. The "/"'
' path is used when no'
' --path-file parameter is specified.')
#
# Batch scan subcommand
#
_help = ('Scan multiple domains and URLs in one command, one scan will'
' be started for each unique protocol-domain-port tuple, the'
' URLs paths are processed and sent in the scan configuration')
batch_parser = subparsers.add_parser('batch',
help=_help,
parents=[common_parser,
scan_common])
batch_parser.add_argument('--urls-file',
required=True,
dest='urls_file',
type=argparse.FileType('r'),
help='Text file containing one URL per line')
#
# Version subcommand
#
_help = 'Print the tagcube-cli version'
version_parser = subparsers.add_parser('version', help=_help)
cmd_args = parser.parse_args(args)
handlers = {'scan': TagCubeCLI.handle_scan_args,
'auth': TagCubeCLI.handle_auth_args,
'batch': TagCubeCLI.handle_batch_args,
'version': TagCubeCLI.handle_version_args,}
handler = handlers.get(cmd_args.subcommand)
return handler(parser, cmd_args)
@staticmethod
def handle_global_args(parser, cmd_args):
#
# Global/Parent extra argument parsing steps
#
together = (cmd_args.key, cmd_args.email)
if len([x for x in together if x is not None]) == 1:
parser.error('--key and --email must be used together')
# Enable debugging if required by the user
level = logging.DEBUG if cmd_args.verbose else logging.INFO
cli_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
cli_logger.addHandler(ch)
return cmd_args
@staticmethod
def handle_auth_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def handle_version_args(parser, cmd_args):
return cmd_args
@staticmethod
def handle_batch_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def handle_scan_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def get_credentials(cmd_args):
"""
:return: The email and api_key to use to connect to TagCube. This
function will try to get the credentials from:
* Command line arguments
* Environment variables
* Configuration file
It will return the first match, in the order specified above.
"""
# Check the cmd args, return if we have something here
cmd_credentials = cmd_args.email, cmd_args.key
if cmd_credentials != (None, None):
cli_logger.debug('Using command line configured credentials')
return cmd_credentials
env_email, env_api_key = get_config_from_env()
if env_email is not None:
if env_api_key is not None:
cli_logger.debug('Using environment configured credentials')
return env_email, env_api_key
cfg_email, cfg_api_key = parse_config_file()
if cfg_email is not None:
if cfg_api_key is not None:
cli_logger.debug('Using .tagcube file configured credentials')
return cfg_email, cfg_api_key
raise ValueError(NO_CREDENTIALS_ERROR)
|
tagcubeio/tagcube-cli
|
tagcube_cli/cli.py
|
TagCubeCLI.parse_args
|
python
|
def parse_args(args=None):
#
# The main parser
#
parser = argparse.ArgumentParser(prog='tagcube',
description=DESCRIPTION,
epilog=EPILOG)
#
# Parser for the common arguments
#
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument('--email',
required=False,
dest='email',
type=argparse_email_type,
help='The email address (user) to use when'
' sending requests to TagCube\'s REST'
' API')
common_parser.add_argument('--key',
required=False,
dest='key',
type=argparse_uuid_type,
help='The API key to authenticate with'
' TagCube\'s REST API')
common_parser.add_argument('-v',
required=False,
dest='verbose',
action='store_true',
help='Enables verbose output')
#
# Parser for common scan arguments
#
scan_common = argparse.ArgumentParser(add_help=False)
scan_common.add_argument('--scan-profile',
required=False,
dest='scan_profile',
default='full_audit',
help='The web application scan profile to use.'
' A complete list of scan profiles can be'
' retrieved from the API or Web UI.')
scan_common.add_argument('--email-notify',
required=False,
dest='email_notify',
type=argparse_email_type,
help='Email address to notify when application'
' scan finishes')
#
# Handle subcommands
#
subparsers = parser.add_subparsers(help='TagCube sub-commands',
dest='subcommand')
#
# Auth test subcommand
#
_help = ('Test configured authentication credentials and exit. No'
' target URL nor email notifications need to be configured'
' to verify the credentials.')
auth_parser = subparsers.add_parser('auth',
help=_help,
parents=[common_parser])
#
# Scan
#
scan_parser = subparsers.add_parser('scan',
help='Web application security'
' scan using TagCube',
parents=[common_parser,
scan_common])
scan_parser.add_argument('--root-url',
required=True,
dest='root_url',
type=argparse_url_type,
help='Root URL for web application security'
' scan (e.g. https://www.target.com/)')
scan_parser.add_argument('--path-file',
required=False,
dest='path_file',
default=['/'],
type=argparse_path_list_type,
help='A file specifying the URL paths (without'
' the domain name) which TagCube will use'
' to bootstrap the web crawler. The "/"'
' path is used when no'
' --path-file parameter is specified.')
#
# Batch scan subcommand
#
_help = ('Scan multiple domains and URLs in one command, one scan will'
' be started for each unique protocol-domain-port tuple, the'
' URLs paths are processed and sent in the scan configuration')
batch_parser = subparsers.add_parser('batch',
help=_help,
parents=[common_parser,
scan_common])
batch_parser.add_argument('--urls-file',
required=True,
dest='urls_file',
type=argparse.FileType('r'),
help='Text file containing one URL per line')
#
# Version subcommand
#
_help = 'Print the tagcube-cli version'
version_parser = subparsers.add_parser('version', help=_help)
cmd_args = parser.parse_args(args)
handlers = {'scan': TagCubeCLI.handle_scan_args,
'auth': TagCubeCLI.handle_auth_args,
'batch': TagCubeCLI.handle_batch_args,
'version': TagCubeCLI.handle_version_args,}
handler = handlers.get(cmd_args.subcommand)
return handler(parser, cmd_args)
|
:return: The result of applying argparse to sys.argv
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/cli.py#L84-L215
| null |
class TagCubeCLI(object):
"""
The main class for the CLI:
* Receives parsed command line arguments
* Creates and configures a TagCubeClient instance
* Launches a scan
"""
API_SUBCOMMAND = {'auth', 'scan', 'batch'}
def __init__(self, cmd_args):
self.cmd_args = cmd_args
@classmethod
def from_cmd_args(cls, cmd_args):
return cls(cmd_args)
def run(self):
"""
This method handles the user's command line arguments, for example, if
the user specified a path file we'll open it and read the contents.
Finally it will run the scan using TagCubeClient.scan(...)
:return: The exit code for our process
"""
client = None
if self.cmd_args.subcommand in self.API_SUBCOMMAND:
email, api_key = TagCubeCLI.get_credentials(self.cmd_args)
client = TagCubeClient(email, api_key, verbose=self.cmd_args.verbose)
subcommands = {'auth': do_auth_test,
'scan': do_scan_start,
'batch': do_batch_scan,
'version': do_version}
try:
subcommands.get(self.cmd_args.subcommand)(client, self.cmd_args)
except ConnectionError, ce:
msg = 'Failed to connect to TagCube REST API: "%s"'
cli_logger.error(msg % ce)
return 3
except TagCubeAPIException, tae:
cli_logger.error('%s' % tae)
return 4
return 0
@staticmethod
@staticmethod
def handle_global_args(parser, cmd_args):
#
# Global/Parent extra argument parsing steps
#
together = (cmd_args.key, cmd_args.email)
if len([x for x in together if x is not None]) == 1:
parser.error('--key and --email must be used together')
# Enable debugging if required by the user
level = logging.DEBUG if cmd_args.verbose else logging.INFO
cli_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
cli_logger.addHandler(ch)
return cmd_args
@staticmethod
def handle_auth_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def handle_version_args(parser, cmd_args):
return cmd_args
@staticmethod
def handle_batch_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def handle_scan_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def get_credentials(cmd_args):
"""
:return: The email and api_key to use to connect to TagCube. This
function will try to get the credentials from:
* Command line arguments
* Environment variables
* Configuration file
It will return the first match, in the order specified above.
"""
# Check the cmd args, return if we have something here
cmd_credentials = cmd_args.email, cmd_args.key
if cmd_credentials != (None, None):
cli_logger.debug('Using command line configured credentials')
return cmd_credentials
env_email, env_api_key = get_config_from_env()
if env_email is not None:
if env_api_key is not None:
cli_logger.debug('Using environment configured credentials')
return env_email, env_api_key
cfg_email, cfg_api_key = parse_config_file()
if cfg_email is not None:
if cfg_api_key is not None:
cli_logger.debug('Using .tagcube file configured credentials')
return cfg_email, cfg_api_key
raise ValueError(NO_CREDENTIALS_ERROR)
|
tagcubeio/tagcube-cli
|
tagcube_cli/cli.py
|
TagCubeCLI.get_credentials
|
python
|
def get_credentials(cmd_args):
# Check the cmd args, return if we have something here
cmd_credentials = cmd_args.email, cmd_args.key
if cmd_credentials != (None, None):
cli_logger.debug('Using command line configured credentials')
return cmd_credentials
env_email, env_api_key = get_config_from_env()
if env_email is not None:
if env_api_key is not None:
cli_logger.debug('Using environment configured credentials')
return env_email, env_api_key
cfg_email, cfg_api_key = parse_config_file()
if cfg_email is not None:
if cfg_api_key is not None:
cli_logger.debug('Using .tagcube file configured credentials')
return cfg_email, cfg_api_key
raise ValueError(NO_CREDENTIALS_ERROR)
|
:return: The email and api_key to use to connect to TagCube. This
function will try to get the credentials from:
* Command line arguments
* Environment variables
* Configuration file
It will return the first match, in the order specified above.
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/cli.py#L259-L287
| null |
class TagCubeCLI(object):
"""
The main class for the CLI:
* Receives parsed command line arguments
* Creates and configures a TagCubeClient instance
* Launches a scan
"""
API_SUBCOMMAND = {'auth', 'scan', 'batch'}
def __init__(self, cmd_args):
self.cmd_args = cmd_args
@classmethod
def from_cmd_args(cls, cmd_args):
return cls(cmd_args)
def run(self):
"""
This method handles the user's command line arguments, for example, if
the user specified a path file we'll open it and read the contents.
Finally it will run the scan using TagCubeClient.scan(...)
:return: The exit code for our process
"""
client = None
if self.cmd_args.subcommand in self.API_SUBCOMMAND:
email, api_key = TagCubeCLI.get_credentials(self.cmd_args)
client = TagCubeClient(email, api_key, verbose=self.cmd_args.verbose)
subcommands = {'auth': do_auth_test,
'scan': do_scan_start,
'batch': do_batch_scan,
'version': do_version}
try:
subcommands.get(self.cmd_args.subcommand)(client, self.cmd_args)
except ConnectionError, ce:
msg = 'Failed to connect to TagCube REST API: "%s"'
cli_logger.error(msg % ce)
return 3
except TagCubeAPIException, tae:
cli_logger.error('%s' % tae)
return 4
return 0
@staticmethod
def parse_args(args=None):
"""
:return: The result of applying argparse to sys.argv
"""
#
# The main parser
#
parser = argparse.ArgumentParser(prog='tagcube',
description=DESCRIPTION,
epilog=EPILOG)
#
# Parser for the common arguments
#
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument('--email',
required=False,
dest='email',
type=argparse_email_type,
help='The email address (user) to use when'
' sending requests to TagCube\'s REST'
' API')
common_parser.add_argument('--key',
required=False,
dest='key',
type=argparse_uuid_type,
help='The API key to authenticate with'
' TagCube\'s REST API')
common_parser.add_argument('-v',
required=False,
dest='verbose',
action='store_true',
help='Enables verbose output')
#
# Parser for common scan arguments
#
scan_common = argparse.ArgumentParser(add_help=False)
scan_common.add_argument('--scan-profile',
required=False,
dest='scan_profile',
default='full_audit',
help='The web application scan profile to use.'
' A complete list of scan profiles can be'
' retrieved from the API or Web UI.')
scan_common.add_argument('--email-notify',
required=False,
dest='email_notify',
type=argparse_email_type,
help='Email address to notify when application'
' scan finishes')
#
# Handle subcommands
#
subparsers = parser.add_subparsers(help='TagCube sub-commands',
dest='subcommand')
#
# Auth test subcommand
#
_help = ('Test configured authentication credentials and exit. No'
' target URL nor email notifications need to be configured'
' to verify the credentials.')
auth_parser = subparsers.add_parser('auth',
help=_help,
parents=[common_parser])
#
# Scan
#
scan_parser = subparsers.add_parser('scan',
help='Web application security'
' scan using TagCube',
parents=[common_parser,
scan_common])
scan_parser.add_argument('--root-url',
required=True,
dest='root_url',
type=argparse_url_type,
help='Root URL for web application security'
' scan (e.g. https://www.target.com/)')
scan_parser.add_argument('--path-file',
required=False,
dest='path_file',
default=['/'],
type=argparse_path_list_type,
help='A file specifying the URL paths (without'
' the domain name) which TagCube will use'
' to bootstrap the web crawler. The "/"'
' path is used when no'
' --path-file parameter is specified.')
#
# Batch scan subcommand
#
_help = ('Scan multiple domains and URLs in one command, one scan will'
' be started for each unique protocol-domain-port tuple, the'
' URLs paths are processed and sent in the scan configuration')
batch_parser = subparsers.add_parser('batch',
help=_help,
parents=[common_parser,
scan_common])
batch_parser.add_argument('--urls-file',
required=True,
dest='urls_file',
type=argparse.FileType('r'),
help='Text file containing one URL per line')
#
# Version subcommand
#
_help = 'Print the tagcube-cli version'
version_parser = subparsers.add_parser('version', help=_help)
cmd_args = parser.parse_args(args)
handlers = {'scan': TagCubeCLI.handle_scan_args,
'auth': TagCubeCLI.handle_auth_args,
'batch': TagCubeCLI.handle_batch_args,
'version': TagCubeCLI.handle_version_args,}
handler = handlers.get(cmd_args.subcommand)
return handler(parser, cmd_args)
@staticmethod
def handle_global_args(parser, cmd_args):
#
# Global/Parent extra argument parsing steps
#
together = (cmd_args.key, cmd_args.email)
if len([x for x in together if x is not None]) == 1:
parser.error('--key and --email must be used together')
# Enable debugging if required by the user
level = logging.DEBUG if cmd_args.verbose else logging.INFO
cli_logger.setLevel(level=level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
cli_logger.addHandler(ch)
return cmd_args
@staticmethod
def handle_auth_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def handle_version_args(parser, cmd_args):
return cmd_args
@staticmethod
def handle_batch_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
def handle_scan_args(parser, cmd_args):
TagCubeCLI.handle_global_args(parser, cmd_args)
return cmd_args
@staticmethod
|
tagcubeio/tagcube-cli
|
tagcube_cli/main.py
|
main
|
python
|
def main():
cmd_args = TagCubeCLI.parse_args()
try:
tagcube_cli = TagCubeCLI.from_cmd_args(cmd_args)
except ValueError, ve:
# We get here when there are no credentials configured
print '%s' % ve
sys.exit(1)
try:
sys.exit(tagcube_cli.run())
except ValueError, ve:
# We get here when the configured credentials had some issue (invalid)
# or there was some error (such as invalid profile name) with the params
print '%s' % ve
sys.exit(2)
|
Project's main method which will parse the command line arguments, run a
scan using the TagCubeClient and exit.
|
train
|
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/main.py#L6-L26
|
[
"def from_cmd_args(cls, cmd_args):\n return cls(cmd_args)\n",
"def run(self):\n \"\"\"\n This method handles the user's command line arguments, for example, if\n the user specified a path file we'll open it and read the contents.\n Finally it will run the scan using TagCubeClient.scan(...)\n\n :return: The exit code for our process\n \"\"\"\n client = None\n\n if self.cmd_args.subcommand in self.API_SUBCOMMAND:\n email, api_key = TagCubeCLI.get_credentials(self.cmd_args)\n client = TagCubeClient(email, api_key, verbose=self.cmd_args.verbose)\n\n subcommands = {'auth': do_auth_test,\n 'scan': do_scan_start,\n 'batch': do_batch_scan,\n 'version': do_version}\n\n try:\n subcommands.get(self.cmd_args.subcommand)(client, self.cmd_args)\n\n except ConnectionError, ce:\n msg = 'Failed to connect to TagCube REST API: \"%s\"'\n cli_logger.error(msg % ce)\n return 3\n\n except TagCubeAPIException, tae:\n cli_logger.error('%s' % tae)\n return 4\n\n return 0\n",
"def parse_args(args=None):\n \"\"\"\n :return: The result of applying argparse to sys.argv\n \"\"\"\n #\n # The main parser\n #\n parser = argparse.ArgumentParser(prog='tagcube',\n description=DESCRIPTION,\n epilog=EPILOG)\n\n #\n # Parser for the common arguments\n #\n common_parser = argparse.ArgumentParser(add_help=False)\n\n common_parser.add_argument('--email',\n required=False,\n dest='email',\n type=argparse_email_type,\n help='The email address (user) to use when'\n ' sending requests to TagCube\\'s REST'\n ' API')\n\n common_parser.add_argument('--key',\n required=False,\n dest='key',\n type=argparse_uuid_type,\n help='The API key to authenticate with'\n ' TagCube\\'s REST API')\n\n common_parser.add_argument('-v',\n required=False,\n dest='verbose',\n action='store_true',\n help='Enables verbose output')\n\n #\n # Parser for common scan arguments\n #\n scan_common = argparse.ArgumentParser(add_help=False)\n\n scan_common.add_argument('--scan-profile',\n required=False,\n dest='scan_profile',\n default='full_audit',\n help='The web application scan profile to use.'\n ' A complete list of scan profiles can be'\n ' retrieved from the API or Web UI.')\n\n scan_common.add_argument('--email-notify',\n required=False,\n dest='email_notify',\n type=argparse_email_type,\n help='Email address to notify when application'\n ' scan finishes')\n\n #\n # Handle subcommands\n #\n subparsers = parser.add_subparsers(help='TagCube sub-commands',\n dest='subcommand')\n\n #\n # Auth test subcommand\n #\n _help = ('Test configured authentication credentials and exit. No'\n ' target URL nor email notifications need to be configured'\n ' to verify the credentials.')\n auth_parser = subparsers.add_parser('auth',\n help=_help,\n parents=[common_parser])\n\n #\n # Scan\n #\n scan_parser = subparsers.add_parser('scan',\n help='Web application security'\n ' scan using TagCube',\n parents=[common_parser,\n scan_common])\n\n scan_parser.add_argument('--root-url',\n required=True,\n dest='root_url',\n type=argparse_url_type,\n help='Root URL for web application security'\n ' scan (e.g. https://www.target.com/)')\n\n scan_parser.add_argument('--path-file',\n required=False,\n dest='path_file',\n default=['/'],\n type=argparse_path_list_type,\n help='A file specifying the URL paths (without'\n ' the domain name) which TagCube will use'\n ' to bootstrap the web crawler. The \"/\"'\n ' path is used when no'\n ' --path-file parameter is specified.')\n\n #\n # Batch scan subcommand\n #\n _help = ('Scan multiple domains and URLs in one command, one scan will'\n ' be started for each unique protocol-domain-port tuple, the'\n ' URLs paths are processed and sent in the scan configuration')\n batch_parser = subparsers.add_parser('batch',\n help=_help,\n parents=[common_parser,\n scan_common])\n\n batch_parser.add_argument('--urls-file',\n required=True,\n dest='urls_file',\n type=argparse.FileType('r'),\n help='Text file containing one URL per line')\n\n #\n # Version subcommand\n #\n _help = 'Print the tagcube-cli version'\n version_parser = subparsers.add_parser('version', help=_help)\n\n cmd_args = parser.parse_args(args)\n\n handlers = {'scan': TagCubeCLI.handle_scan_args,\n 'auth': TagCubeCLI.handle_auth_args,\n 'batch': TagCubeCLI.handle_batch_args,\n 'version': TagCubeCLI.handle_version_args,}\n\n handler = handlers.get(cmd_args.subcommand)\n return handler(parser, cmd_args)\n"
] |
import sys
from tagcube_cli.cli import TagCubeCLI
if __name__ == '__main__':
main()
|
radjkarl/appBase
|
appbase/mainWindowRessources/menuabout.py
|
MenuAbout.setModule
|
python
|
def setModule(self, mod):
"""
fill the about about label txt with the module attributes of the module
"""
txt = """<b>%s</b> - %s<br><br>
Author: %s<br>
Email: %s<br>
Version: %s<br>
License: %s<br>
Url: <a href="%s">%s</a>""" % (
mod.__name__,
mod.__doc__,
mod.__author__,
mod.__email__,
mod.__version__,
mod.__license__,
mod.__url__, mod.__url__)
self.label_txt.setText(txt)
self.label_txt.setOpenExternalLinks(True)
|
fill the about about label txt with the module attributes of the module
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/mainWindowRessources/menuabout.py#L30-L48
| null |
class MenuAbout(QtWidgets.QWidget):
"""Create a simple about window, showing a logo
and general information defined in the main modules __init__.py file
"""
def __init__(self, parent=None):
self.app = QtWidgets.QApplication.instance()
super(MenuAbout, self).__init__(parent)
self.setWindowTitle('About')
l = QtWidgets.QHBoxLayout()
self.setLayout(l)
logo = QtSvg.QSvgWidget(self.app.session.ICON)
s = logo.sizeHint()
aR = s.height() / s.width()
h = 150
w = h / aR
logo.setFixedSize(w, h)
self.label_txt = QtWidgets.QLabel()
l.addWidget(logo)
l.addWidget(self.label_txt)
def setModule(self, mod):
"""
fill the about about label txt with the module attributes of the module
"""
txt = """<b>%s</b> - %s<br><br>
Author: %s<br>
Email: %s<br>
Version: %s<br>
License: %s<br>
Url: <a href="%s">%s</a>""" % (
mod.__name__,
mod.__doc__,
mod.__author__,
mod.__email__,
mod.__version__,
mod.__license__,
mod.__url__, mod.__url__)
self.label_txt.setText(txt)
self.label_txt.setOpenExternalLinks(True)
def setInstitutionLogo(self, pathList: tuple):
"""
takes one or more [logo].svg paths
if logo should be clickable, set
pathList = (
(my_path1.svg,www.something1.html),
(my_path2.svg,www.something2.html),
...)
"""
for p in pathList:
url = None
if type(p) in (list, tuple):
p, url = p
logo = QtSvg.QSvgWidget(p)
s = logo.sizeHint()
aR = s.height() / s.width()
h = 150
w = h / aR
logo.setFixedSize(int(w), int(h))
self.layout().addWidget(logo)
if url:
logo.mousePressEvent = lambda evt, u=url: self._openUrl(evt, u)
def _openUrl(self, evt, url):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
return evt.accept()
|
radjkarl/appBase
|
appbase/mainWindowRessources/menuabout.py
|
MenuAbout.setInstitutionLogo
|
python
|
def setInstitutionLogo(self, pathList: tuple):
"""
takes one or more [logo].svg paths
if logo should be clickable, set
pathList = (
(my_path1.svg,www.something1.html),
(my_path2.svg,www.something2.html),
...)
"""
for p in pathList:
url = None
if type(p) in (list, tuple):
p, url = p
logo = QtSvg.QSvgWidget(p)
s = logo.sizeHint()
aR = s.height() / s.width()
h = 150
w = h / aR
logo.setFixedSize(int(w), int(h))
self.layout().addWidget(logo)
if url:
logo.mousePressEvent = lambda evt, u=url: self._openUrl(evt, u)
|
takes one or more [logo].svg paths
if logo should be clickable, set
pathList = (
(my_path1.svg,www.something1.html),
(my_path2.svg,www.something2.html),
...)
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/mainWindowRessources/menuabout.py#L50-L71
| null |
class MenuAbout(QtWidgets.QWidget):
"""Create a simple about window, showing a logo
and general information defined in the main modules __init__.py file
"""
def __init__(self, parent=None):
self.app = QtWidgets.QApplication.instance()
super(MenuAbout, self).__init__(parent)
self.setWindowTitle('About')
l = QtWidgets.QHBoxLayout()
self.setLayout(l)
logo = QtSvg.QSvgWidget(self.app.session.ICON)
s = logo.sizeHint()
aR = s.height() / s.width()
h = 150
w = h / aR
logo.setFixedSize(w, h)
self.label_txt = QtWidgets.QLabel()
l.addWidget(logo)
l.addWidget(self.label_txt)
def setModule(self, mod):
"""
fill the about about label txt with the module attributes of the module
"""
txt = """<b>%s</b> - %s<br><br>
Author: %s<br>
Email: %s<br>
Version: %s<br>
License: %s<br>
Url: <a href="%s">%s</a>""" % (
mod.__name__,
mod.__doc__,
mod.__author__,
mod.__email__,
mod.__version__,
mod.__license__,
mod.__url__, mod.__url__)
self.label_txt.setText(txt)
self.label_txt.setOpenExternalLinks(True)
def setInstitutionLogo(self, pathList: tuple):
"""
takes one or more [logo].svg paths
if logo should be clickable, set
pathList = (
(my_path1.svg,www.something1.html),
(my_path2.svg,www.something2.html),
...)
"""
for p in pathList:
url = None
if type(p) in (list, tuple):
p, url = p
logo = QtSvg.QSvgWidget(p)
s = logo.sizeHint()
aR = s.height() / s.width()
h = 150
w = h / aR
logo.setFixedSize(int(w), int(h))
self.layout().addWidget(logo)
if url:
logo.mousePressEvent = lambda evt, u=url: self._openUrl(evt, u)
def _openUrl(self, evt, url):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
return evt.accept()
|
radjkarl/appBase
|
appbase/mainWindowRessources/menubar.py
|
MenuBar.setFullscreen
|
python
|
def setFullscreen(self, fullscreen):
"""toggle between fullscreen and normal window"""
if not fullscreen:
self.ckBox_fullscreen.setChecked(False)
self.parent().showNormal()
else:
self.ckBox_fullscreen.setChecked(True)
self.parent().showFullScreen()
|
toggle between fullscreen and normal window
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/mainWindowRessources/menubar.py#L165-L172
| null |
class MenuBar(FWMenuBar):
"""
MenuBar including
* File (Save, Load, New...)
* State (Next, Previous...)
* View (Fullscreen)
* Help (Shortcuts, About)
"""
def __init__(self):
super(MenuBar, self).__init__()
self.app = QtWidgets.QApplication.instance()
#MENU - FILE
self.menu_file = self.addMenu('&File')
new_add = self.menu_file.addAction('New')
new_add.setStatusTip('...in new window')
new_add.setShortcuts(QtGui.QKeySequence.New)
self.menu_file.addSeparator()
save = self.menu_file.addAction('Save')
save.setStatusTip('Override last saved session')
save.setShortcuts(QtGui.QKeySequence.Save)
save_as = self.menu_file.addAction('Save As')
save.setStatusTip('Choose a name')
save_as.setShortcuts(QtGui.QKeySequence.SaveAs)
open_add = self.menu_file.addAction('Open')
open_add.setStatusTip('...in new window')
open_add.setShortcuts(QtGui.QKeySequence.Open)
self.m_open_recent = self.menu_file.addMenu('Open Recent')
self.m_open_recent.aboutToShow.connect(self._updateOpenRecentMenu)
self.menu_file.addSeparator()
self.file_preferences = MenuPreferences(self)
self.menu_file.action_preferences = self.menu_file.addAction(
'Preferences')
self.menu_file.action_preferences.triggered.connect(
self.file_preferences.show)
self.menu_file.addAction('Exit').triggered.connect(
self.app.closeAllWindows)
#MENU - STATE
menu_state = self.addMenu('&State')
self.a_previous = menu_state.addAction('Previous')
self.a_previous.setStatusTip('Restore a previously saved state')
self.a_previous.triggered.connect(
self.app.session.restorePreviousState)
self.a_next = menu_state.addAction('Next')
self.a_next.setStatusTip('Restore a previously saved state')
self.a_next.triggered.connect(self.app.session.restoreNextState)
self.m_setState = menu_state.addMenu('Set')
self.m_setState.aboutToShow.connect(self._updateSetStateActions)
self.m_renameState = menu_state.addMenu('Rename')
self.m_renameState.aboutToShow.connect(self._updateRenameStateActions)
#MENU - VIEW
self.menu_view = self.addMenu('&View')
self.ckBox_fullscreen = QtWidgets.QAction(
'Fullscreen', self.menu_view, checkable=True)
self.menu_view.addAction(self.ckBox_fullscreen)
self.ckBox_fullscreen.setStatusTip(
'Toggle between window and fullscreen')
self.ckBox_fullscreen.triggered.connect(self.setFullscreen)
self.ckBox_fullscreen.setShortcuts(QtGui.QKeySequence('F11'))
#MENU - HELP
self.menu_help = self.addMenu('&Help')
sc = self.menu_help.addAction('Shortcuts')
sc.setStatusTip('...list all shortcuts')
self.shortcutsWidget = MenuShortcuts()
sc.triggered.connect(self.shortcutsWidget.show)
self.menu_help.addSeparator()
about = self.menu_help.addAction('About')
about.setShortcuts(QtGui.QKeySequence('F1'))
self.aboutWidget = MenuAbout()
about.triggered.connect(self.aboutWidget.show)
# CONNECTING TO APPLICATION.SESSION
s = self.app.session
new_add.triggered.connect(s.new)
save.triggered.connect(s.save)
save_as.triggered.connect(lambda checked: s.saveAs())
open_add.triggered.connect(s.open)
def _updateOpenRecentMenu(self):
self.m_open_recent.clear()
for s in self.app.session.opts['recent sessions']:
s = PathStr(s)
a = self.m_open_recent.addAction(s.basename())
a.setToolTip(s)
a.triggered.connect(lambda checked, s=s: self.app.session.new(s))
def _updateRenameStateActions(self):
self.m_renameState.clear()
se = self.app.session
for s in se.stateNames():
txt = '[%s]' % s
if s == se.current_session:
txt += ' <-'
self.m_renameState.addAction(txt).triggered.connect(
lambda checked, s=s: self._showRenameStateDialog(s))
def _showRenameStateDialog(self, oldStateName):
r = _RenameStateDialog(oldStateName)
ret = r.exec_()
t = str(r.editor.text())
if ret == QtWidgets.QDialog.Accepted and t and t != oldStateName:
self.app.session.renameState(oldStateName, t)
def _updateSetStateActions(self):
self.m_setState.clear()
se = self.app.session
for s in se.stateNames():
txt = '[%s]' % s
if s == se.current_session:
txt += ' <-'
self.m_setState.addAction(txt).triggered.connect(
lambda checked, s=s: se.restoreStateName(s))
def setFullscreen(self, fullscreen):
"""toggle between fullscreen and normal window"""
if not fullscreen:
self.ckBox_fullscreen.setChecked(False)
self.parent().showNormal()
else:
self.ckBox_fullscreen.setChecked(True)
self.parent().showFullScreen()
|
radjkarl/appBase
|
appbase/dialogs/FirstStart.py
|
FirstStart.accept
|
python
|
def accept(self, evt):
"""
write setting to the preferences
"""
# determine if application is a script file or frozen exe (pyinstaller)
frozen = getattr(sys, 'frozen', False)
if frozen:
app_file = sys.executable
else:
app_file = PathStr(__main__.__file__).abspath()
if self.cb_startmenu.isChecked():
# TODO: allow only logo location
# icon = app_file.dirname().join('media', 'logo.ico')
StartMenuEntry(self.name, app_file, icon=self.icon,
console=False).create()
if self.cb_mime.isChecked():
# get admin rights
if not isAdmin():
try:
# run this file as __main__ with admin rights:
if frozen:
cmd = "from %s import embeddIntoOS\nembeddIntoOS('%s', '%s', '%s')" % (
__name__, '', self.ftype, self.name)
# in this case there is no python.exe and no moduly.py to call
# thats why we have to import the method and execute it
runAsAdmin((sys.executable, '-exec', cmd))
else:
runAsAdmin((sys.executable, __file__,
app_file, self.ftype, self.name))
except:
print('needs admin rights to work')
else:
embeddIntoOS(app_file, self.ftype, self.name)
QtWidgets.QDialog.accept(self)
|
write setting to the preferences
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/dialogs/FirstStart.py#L56-L92
|
[
"def embeddIntoOS(app_file, ftype, app_name):\n if app_file:\n args = (app_file, '-o')\n else:\n args = '-o'\n assignFtypeToPyFile(\n ftype,\n args,\n mimetype='%s.file' %\n app_name,\n showTerminal=False)\n"
] |
class FirstStart(QtWidgets.QDialog):
"""
Dialog to ask user to embed the application into the OS
"""
def __init__(self, session):
QtWidgets.QDialog.__init__(self)
self.name = session.NAME
self.ftype = session.FTYPE
self.icon = session.ICON
self.setWindowTitle('Starting %s the first time...' % self.name)
self.resize(300, 100)
l = QtWidgets.QVBoxLayout()
self.setLayout(l)
self.cb_startmenu = QtWidgets.QCheckBox('Add to start menu')
self.cb_startmenu.setChecked(True)
self.cb_mime = QtWidgets.QCheckBox(
'Open *.%s files with %s [NEEDS ADMIN]' %
(self.ftype, self.name))
self.cb_mime.setChecked(True)
self.btn_done = QtWidgets.QPushButton('Done')
self.btn_done.clicked.connect(self.accept)
l.addWidget(
QtWidgets.QLabel(
"""The folder '.%s' will be created in
your home directory to store all
necassary information.""" %
self.name))
l.addWidget(self.cb_startmenu)
l.addWidget(self.cb_mime)
l.addWidget(self.btn_done)
# TODO: does not match signature
def accept(self, evt):
"""
write setting to the preferences
"""
# determine if application is a script file or frozen exe (pyinstaller)
frozen = getattr(sys, 'frozen', False)
if frozen:
app_file = sys.executable
else:
app_file = PathStr(__main__.__file__).abspath()
if self.cb_startmenu.isChecked():
# TODO: allow only logo location
# icon = app_file.dirname().join('media', 'logo.ico')
StartMenuEntry(self.name, app_file, icon=self.icon,
console=False).create()
if self.cb_mime.isChecked():
# get admin rights
if not isAdmin():
try:
# run this file as __main__ with admin rights:
if frozen:
cmd = "from %s import embeddIntoOS\nembeddIntoOS('%s', '%s', '%s')" % (
__name__, '', self.ftype, self.name)
# in this case there is no python.exe and no moduly.py to call
# thats why we have to import the method and execute it
runAsAdmin((sys.executable, '-exec', cmd))
else:
runAsAdmin((sys.executable, __file__,
app_file, self.ftype, self.name))
except:
print('needs admin rights to work')
else:
embeddIntoOS(app_file, self.ftype, self.name)
QtWidgets.QDialog.accept(self)
|
radjkarl/appBase
|
appbase/MultiWorkspaceWindow.py
|
MultiWorkspaceWindow.workspaces
|
python
|
def workspaces(self, index=None):
"""return generator for all all workspace instances"""
c = self.centralWidget()
if index is None:
return (c.widget(n) for n in range(c.count()))
else:
return c.widget(index)
|
return generator for all all workspace instances
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/MultiWorkspaceWindow.py#L77-L83
| null |
class MultiWorkspaceWindow(MainWindow):
"""Adding workspace management to appbase.MainWindow
* 'Workspace' menu in menu bar
* Switch between workspaces with [Ctrl]+[Page up/down]
* Add workspace with [Ctrl]+[W]
* Remove current workspace with [Ctrl]+[Q]
"""
def __init__(self, workspaceClass, *args, **kwargs):
"""
workspaceClass needs to have the following methods:
* def setActive -> called when workspace is activated
* def setInactive -> called when workspace is deactivated
"""
MainWindow.__init__(self, *args, **kwargs)
self._workspace_cls = workspaceClass
self.setCentralWidget(QtWidgets.QStackedWidget())
# APPEND MENUBAR
m = self.menuBar()
w = m.menu_workspace = QtWidgets.QMenu('&Workspace')
m.insertMenuBefore(m.menu_help, w)
action_add = QtWidgets.QAction('&Add', w)
action_add.triggered.connect(self.addWorkspace)
action_add.setShortcuts(
QtGui.QKeySequence(
QtCore.Qt.CTRL +
QtCore.Qt.Key_W))
w.addAction(action_add)
action_close = QtWidgets.QAction('&Close current', w)
action_close.triggered.connect(self.closeCurrentWorkspace)
action_close.setShortcuts(
QtGui.QKeySequence(
QtCore.Qt.CTRL +
QtCore.Qt.Key_Q))
w.addAction(action_close)
action_next = QtWidgets.QAction('&Next', w)
action_next.triggered.connect(self.showNextWorkspace)
action_next.setShortcuts(
QtGui.QKeySequence(
QtCore.Qt.CTRL +
QtCore.Qt.Key_PageUp))
w.addAction(action_next)
action_previous = QtWidgets.QAction('&Previous', w)
action_previous.triggered.connect(self.showPreviousWorkspace)
action_previous.setShortcuts(
QtGui.QKeySequence(
QtCore.Qt.CTRL +
QtCore.Qt.Key_PageDown))
w.addAction(action_previous)
self._menu_workspaces = QtWidgets.QMenu('Set', w)
self._menu_workspaces.aboutToShow.connect(self._listWorkspacesInMenu)
w.addMenu(self._menu_workspaces)
# Shortcuts
sc = self.menuBar().shortcutsWidget
sc.addShortcut('Alt+w', 'Add Workspace')
sc.addShortcut('Alt+q', 'Close current Workspace')
sc.addShortcut('Alt+PageUp', 'Show next Workspace')
sc.addShortcut('Alt+PageDown', 'Show previous Workspace')
def workspaces(self, index=None):
"""return generator for all all workspace instances"""
c = self.centralWidget()
if index is None:
return (c.widget(n) for n in range(c.count()))
else:
return c.widget(index)
def currentWorkspace(self):
return self.centralWidget().currentWidget()
def addWorkspace(self):
w = self.currentWorkspace()
if w:
w.setInactive()
w = self._workspace_cls(self)
c = self.centralWidget()
i = c.addWidget(w)
c.setCurrentIndex(i)
self.setTitleAdditive('[%s/%s]' % (i + 1, c.count()))
return w
def closeWorkspace(self, ws):
ws.close()
self.centralWidget().removeWidget(ws)
ws.deleteLater()
def closeCurrentWorkspace(self):
c = self.centralWidget()
self.closeWorkspace(c.currentWidget())
if c.count() == 0:
self.addWorkspace()
else:
c.setCurrentIndex(c.count() - 1)
self.setTitleAdditive('[%s/%s]' % (c.count(), c.count()))
def showNextWorkspace(self):
c = self.centralWidget()
i = c.currentIndex()
if i >= c.count() - 1:
return # at the end
self.showWorkspace(i + 1)
def showWorkspace(self, i):
c = self.centralWidget()
c.currentWidget().setInactive()
c.setCurrentIndex(i)
w = c.currentWidget()
w.setActive()
self.setTitleAdditive('[%s/%s]' % (i + 1, c.count()))
def showPreviousWorkspace(self):
c = self.centralWidget()
i = c.currentIndex()
if i == 0:
return
self.showWorkspace(i - 1)
def _listWorkspacesInMenu(self):
c = self.centralWidget()
self._menu_workspaces.clear()
for i in range(c.count()):
if i == c.currentIndex():
t = '[%s] <-' % str(i + 1)
else:
t = '[%s]' % str(i + 1)
a = QtWidgets.QAction(t, self._menu_workspaces)
a.triggered.connect(
lambda clicked,
i=i,
self=self: self.showWorkspace(i))
self._menu_workspaces.addAction(a)
|
radjkarl/appBase
|
appbase/Launcher.py
|
_FileSystemModel.data
|
python
|
def data(self, index, role):
"""use zipped icon.png as icon"""
if index.column() == 0 and role == QtCore.Qt.DecorationRole:
if self.isPyz(index):
with ZipFile(str(self.filePath(index)), 'r') as myzip:
# print myzip.namelist()
try:
myzip.extract('icon', self._tmp_dir_work)
p = os.path.join(self._tmp_dir_work, 'icon')
return QtGui.QIcon(p)
except KeyError:
pass
return super(_FileSystemModel, self).data(index, role)
|
use zipped icon.png as icon
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Launcher.py#L524-L536
|
[
"def isPyz(self, index):\n return str(self.fileName(index)).endswith('.%s' % self.file_type)\n"
] |
class _FileSystemModel(QtWidgets.QFileSystemModel):
def __init__(self, view, file_type):
QtWidgets.QFileSystemModel.__init__(self, view)
self.view = view
self.file_type = file_type
self.setReadOnly(False)
self._editedSessions = {}
self._tmp_dir_work = tempfile.mkdtemp('PYZ-launcher')
def isPyz(self, index):
return str(self.fileName(index)).endswith('.%s' % self.file_type)
def extractFiles(self, index, *fnames):
extnames = []
with ZipFile(str(self.filePath(index)), 'r') as myzip:
for name in fnames:
try:
myzip.extract(name, self._tmp_dir_work)
extnames.append(os.path.join(self._tmp_dir_work, name))
except KeyError:
extnames.append(None)
return extnames
# TODO: does not match signature
def data(self, index, role):
"""use zipped icon.png as icon"""
if index.column() == 0 and role == QtCore.Qt.DecorationRole:
if self.isPyz(index):
with ZipFile(str(self.filePath(index)), 'r') as myzip:
# print myzip.namelist()
try:
myzip.extract('icon', self._tmp_dir_work)
p = os.path.join(self._tmp_dir_work, 'icon')
return QtGui.QIcon(p)
except KeyError:
pass
return super(_FileSystemModel, self).data(index, role)
def editStartScript(self, index):
"""open, edit, replace __main__.py"""
f = str(self.fileName(index))
if f.endswith('.%s' % self.file_type):
zipname = str(self.filePath(index))
with ZipFile(zipname, 'a') as myzip:
# extract+save script in tmp-dir:
myzip.extract('__main__.py', self._tmp_dir_work)
tempfilename = f[:-4]
tempfilepath = os.path.join(self._tmp_dir_work, tempfilename)
os.rename(
os.path.join(
self._tmp_dir_work,
'__main__.py'),
tempfilepath)
self.openTxt(tempfilepath)
self._editedSessions[index] = (
zipname, self._tmp_dir_work, tempfilename)
def openTxt(self, path):
# open and editor (depending on platform):
if sys.platform.startswith('darwin'):
subprocess.call(('open', path))
elif os.name == 'nt':
os.startfile(path)
elif os.name == 'posix':
subprocess.call(('xdg-open', path))
def updateStartStript(self, index):
if index in self._editedSessions:
zipname, dirname, tempfilename = self._editedSessions[index]
tempfilepath = os.path.join(dirname, tempfilename)
# print dirname, tempfilename
if os.path.exists(tempfilepath):
print("adopt changed startScript '%s'" % tempfilename)
with ZipFile(zipname, 'a') as myzip:
myzip.write(tempfilepath, '__main__.py')
os.remove(tempfilepath)
|
radjkarl/appBase
|
appbase/Launcher.py
|
_FileSystemModel.editStartScript
|
python
|
def editStartScript(self, index):
"""open, edit, replace __main__.py"""
f = str(self.fileName(index))
if f.endswith('.%s' % self.file_type):
zipname = str(self.filePath(index))
with ZipFile(zipname, 'a') as myzip:
# extract+save script in tmp-dir:
myzip.extract('__main__.py', self._tmp_dir_work)
tempfilename = f[:-4]
tempfilepath = os.path.join(self._tmp_dir_work, tempfilename)
os.rename(
os.path.join(
self._tmp_dir_work,
'__main__.py'),
tempfilepath)
self.openTxt(tempfilepath)
self._editedSessions[index] = (
zipname, self._tmp_dir_work, tempfilename)
|
open, edit, replace __main__.py
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Launcher.py#L538-L555
| null |
class _FileSystemModel(QtWidgets.QFileSystemModel):
def __init__(self, view, file_type):
QtWidgets.QFileSystemModel.__init__(self, view)
self.view = view
self.file_type = file_type
self.setReadOnly(False)
self._editedSessions = {}
self._tmp_dir_work = tempfile.mkdtemp('PYZ-launcher')
def isPyz(self, index):
return str(self.fileName(index)).endswith('.%s' % self.file_type)
def extractFiles(self, index, *fnames):
extnames = []
with ZipFile(str(self.filePath(index)), 'r') as myzip:
for name in fnames:
try:
myzip.extract(name, self._tmp_dir_work)
extnames.append(os.path.join(self._tmp_dir_work, name))
except KeyError:
extnames.append(None)
return extnames
# TODO: does not match signature
def data(self, index, role):
"""use zipped icon.png as icon"""
if index.column() == 0 and role == QtCore.Qt.DecorationRole:
if self.isPyz(index):
with ZipFile(str(self.filePath(index)), 'r') as myzip:
# print myzip.namelist()
try:
myzip.extract('icon', self._tmp_dir_work)
p = os.path.join(self._tmp_dir_work, 'icon')
return QtGui.QIcon(p)
except KeyError:
pass
return super(_FileSystemModel, self).data(index, role)
def editStartScript(self, index):
"""open, edit, replace __main__.py"""
f = str(self.fileName(index))
if f.endswith('.%s' % self.file_type):
zipname = str(self.filePath(index))
with ZipFile(zipname, 'a') as myzip:
# extract+save script in tmp-dir:
myzip.extract('__main__.py', self._tmp_dir_work)
tempfilename = f[:-4]
tempfilepath = os.path.join(self._tmp_dir_work, tempfilename)
os.rename(
os.path.join(
self._tmp_dir_work,
'__main__.py'),
tempfilepath)
self.openTxt(tempfilepath)
self._editedSessions[index] = (
zipname, self._tmp_dir_work, tempfilename)
def openTxt(self, path):
# open and editor (depending on platform):
if sys.platform.startswith('darwin'):
subprocess.call(('open', path))
elif os.name == 'nt':
os.startfile(path)
elif os.name == 'posix':
subprocess.call(('xdg-open', path))
def updateStartStript(self, index):
if index in self._editedSessions:
zipname, dirname, tempfilename = self._editedSessions[index]
tempfilepath = os.path.join(dirname, tempfilename)
# print dirname, tempfilename
if os.path.exists(tempfilepath):
print("adopt changed startScript '%s'" % tempfilename)
with ZipFile(zipname, 'a') as myzip:
myzip.write(tempfilepath, '__main__.py')
os.remove(tempfilepath)
|
radjkarl/appBase
|
appbase/Session.py
|
Session.checkMaxSessions
|
python
|
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
|
check whether max. number of saved sessions is reached
if: remove the oldest session
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L208-L218
|
[
" def stateNames(self):\n \"\"\"Returns:\n list: the names of all saved sessions\n \"\"\"\n# if self.current_session:\n s = self.tmp_dir_session\n l = [x for x in s.listdir() if s.join(x).isdir()]\n naturalSorting(l)\n # else:\n # l=[]\n # bring autosave to first position:\n if 'autoSave' in l:\n l.remove('autoSave')\n l.insert(0, 'autoSave')\n return l\n"
] |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session.stateNames
|
python
|
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
|
Returns:
list: the names of all saved sessions
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L220-L234
| null |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session._inspectArguments
|
python
|
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
|
inspect the command-line-args and give them to appBase
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L327-L356
| null |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session.save
|
python
|
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
|
save the current session
override, if session was saved earlier
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L368-L374
|
[
"def saveAs(self, filename=None):\n if filename is None:\n # ask for filename:\n filename = self.dialogs.getSaveFileName(filter=\"*.%s\" % self.FTYPE)\n if filename:\n self.path = filename\n self._saveState(self.path)\n if self._createdAutosaveFile:\n self._createdAutosaveFile.remove()\n print(\n \"removed automatically created '%s'\" %\n self._createdAutosaveFile)\n self._createdAutosaveFile = None\n",
"def _saveState(self, path):\n \"\"\"save current state and add a new state\"\"\"\n self.addSession() # next session\n self._save(str(self.n_sessions), path)\n"
] |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session.open
|
python
|
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
|
open a session to define in a dialog in an extra window
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L397-L401
|
[
"def new(self, filename=None):\n \"\"\"start a session an independent process\"\"\"\n path = (self.exec_path,)\n if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):\n # get the absolute path to the python-executable\n p = find_executable(\"python\")\n path = (p, 'python') + path\n else:\n # if run in frozen env (.exe):\n # first arg if execpath of the next session:\n path += (self.exec_path,)\n if filename:\n path += ('-o', filename)\n os.spawnl(os.P_NOWAIT, *path)\n"
] |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session.new
|
python
|
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
|
start a session an independent process
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L403-L416
| null |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session._saveState
|
python
|
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
|
save current state and add a new state
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L456-L459
|
[
"def addSession(self):\n self.current_session = self.n_sessions\n self.n_sessions += 1\n self.tmp_dir_save_session = self.tmp_dir_session.join(\n str(self.n_sessions)).mkdir()\n self.checkMaxSessions()\n",
"def _save(self, stateName, path):\n \"\"\"save into 'stateName' to pyz-path\"\"\"\n print('saving...')\n\n state = {'session': dict(self.opts),\n 'dialogs': self.dialogs.saveState()}\n\n self.sigSave.emit(state)\n self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)\n self.saveThread.start()\n\n self.current_session = stateName\n\n r = self.opts['recent sessions']\n try:\n # is this session already exists: remove it\n r.pop(r.index(path))\n except ValueError:\n pass\n # add this session at the beginning\n r.insert(0, path)\n"
] |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session._autoSave
|
python
|
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
|
save state into 'autosave'
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L461-L469
| null |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session.blockingSave
|
python
|
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
|
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L471-L481
| null |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
Session._save
|
python
|
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
save into 'stateName' to pyz-path
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L483-L503
| null |
class Session(QtCore.QObject):
"""Session management to be accessible
in QtWidgets.QApplication.instance().session
* extract the opened (as pyz-zipped) session in a temp folder
* create 2nd temp-folder for sessions to be saved
* send a close signal to all child structures when exit
* write a log file with all output
* enable icons in menus of gnome-sessions [linux only]
* gives option of debug mode
"""
# sigPathChanged = QtCore.Signal(object) #path
sigSave = QtCore.Signal(object) # state dict
sigRestore = QtCore.Signal(object) # state dict
def __init__(self, args, **kwargs):
"""
Args:
first_start_dialog (Optional[bool]):
Show a different dialog for the first start.
name (Optional[str]): The applications name.
type (Optional[str]): The file type to be used for saving sessions.
icon (Optional[str]): Path to the application icon.
"""
QtCore.QObject.__init__(self)
# SESSION CONSTANTS:
self.NAME = kwargs.get('name', __main__.__name__)
self.FTYPE = kwargs.get('ftype', 'pyz')
self.ICON = kwargs.get('icon', None)
# hidden app-preferences folder:
self.dir = PathStr.home().mkdir('.%s' % self.NAME)
self.APP_CONFIG_FILE = self.dir.join('config.txt')
self._tmp_dir_session = None
self.current_session = None
# global session options - same for all new sessions:
self.opts = _Opts({
'maxSessions': 3,
'enableGuiIcons': True,
'writeToShell': True,
'createLog': False,
'debugMode': False,
'autosave': False,
'autosaveIntervalMin': 15,
'server': False,
'showCloseDialog': True,
'recent sessions': []
}, self)
# self.app_opts = {'showCloseDialog': True, 'recent sessions': []}
if not self.APP_CONFIG_FILE.exists():
# allow different first start dialog:
dialog = kwargs.get('first_start_dialog', FirstStart)
f = dialog(self)
f.exec_()
if not f.result():
sys.exit()
# create the config file
with open(self.APP_CONFIG_FILE, 'w') as f:
pass
else:
with open(self.APP_CONFIG_FILE, 'r') as f:
r = f.read()
if r:
self.opts.update(eval(r))
self._icons_enabled = False
self.log_file = None
dirname = self.opts['recent sessions']
if dirname:
dirname = PathStr(dirname[-1]).dirname()
self.dialogs = Dialogs(dirname)
self.saveThread = _SaveThread()
self._createdAutosaveFile = None
self.tmp_dir_save_session = None
# a work-dir for temp. storage:
# self.tmp_dir_work = PathStr(tempfile.mkdtemp('%s_work' % self.NAME))
pathName = self._inspectArguments(args)
self.setSessionPath(pathName)
if self.opts['createLog']:
self._setupLogFile()
# create connectable stdout and stderr signal:
self.streamOut = StreamSignal('out')
self.streamErr = StreamSignal('err')
self._enableGuiIcons()
# Auto-save timer:
self.timerAutosave = QtCore.QTimer()
self.timerAutosave.timeout.connect(self._autoSave)
self.opts.activate()
# first thing to do after start:
QtCore.QTimer.singleShot(0, self.restoreCurrentState)
def setSessionPath(self, path, statename=None):
if path: # and path.endswith('.%s' %self.FTYPE):
# this script was opened out from a zip-container (named as
# '*.pyz')
self.path = PathStr(path)
self.dir = self.path.dirname().abspath()
# extract the zip temporally
ZipFile(self.path, 'r').extractall(path=self.tmp_dir_session)
self.n_sessions = len(self.stateNames())
# SET STATE
snames = self.stateNames()
if statename is None:
# last one
self.current_session = snames[-1]
elif statename in snames:
self.current_session = statename
else:
raise Exception(
"state '%s' not in saved states %s" %
(statename, snames))
else:
self.path = None
self.n_sessions = 0
self.current_session = None
def writeLog(self, write=True):
if not self.log_file:
return
so = self.streamOut.message
se = self.streamErr.message
w = self.log_file.write
if write:
try:
# ensure only connected once
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
so.connect(w)
se.connect(w)
else:
try:
so.disconnect(w)
se.disconnect(w)
except TypeError:
pass
def _enableGuiIcons(self):
# enable icons in all QMenuBars only for this program if generally
# disabled
if self.opts['enableGuiIcons']:
if os.name == 'posix': # linux
this_env = str(os.environ.get('DESKTOP_SESSION'))
relevant_env = (
'gnome',
'gnome-shell',
'ubuntustudio',
'xubuntu')
if this_env in relevant_env:
if 'false' in os.popen(
# if the menu-icons on the gnome-desktop are
# disabled
'gconftool-2 --get /desktop/gnome/interface/menus_have_icons').read():
print('enable menu-icons')
os.system(
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons True')
self._icons_enabled = True
def _setupLogFile(self):
lfile = self.tmp_dir_session.join('log.txt')
if lfile.exists():
self.log_file = open(lfile, 'a')
else:
self.log_file = open(lfile, 'w')
self.log_file.write('''
####################################
New run at %s
####################################
''' % strftime("%d.%m.%Y|%H:%M:%S", gmtime()))
def checkMaxSessions(self, nMax=None):
"""
check whether max. number of saved sessions is reached
if: remove the oldest session
"""
if nMax is None:
nMax = self.opts['maxSessions']
l = self.stateNames()
if len(l) > nMax:
for f in l[:len(l) - nMax]:
self.tmp_dir_session.remove(str(f))
def stateNames(self):
"""Returns:
list: the names of all saved sessions
"""
# if self.current_session:
s = self.tmp_dir_session
l = [x for x in s.listdir() if s.join(x).isdir()]
naturalSorting(l)
# else:
# l=[]
# bring autosave to first position:
if 'autoSave' in l:
l.remove('autoSave')
l.insert(0, 'autoSave')
return l
def restorePreviousState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i > 1:
self.current_session = s[i - 1]
self.restoreCurrentState()
def restoreNextState(self):
s = self.stateNames()
if s:
i = s.index(self.current_session)
if i < len(s) - 1:
self.current_session = s[i + 1]
self.restoreCurrentState()
def restoreStateName(self, name):
"""restore the state of given [name]"""
self.current_session = name
self.restoreCurrentState()
def renameState(self, oldStateName, newStateName):
s = self.tmp_dir_session.join(oldStateName)
s.rename(newStateName)
if self.current_session == oldStateName:
self.current_session = newStateName
print("==> State [%s] renamed to [%s]" % (oldStateName, newStateName))
def _recusiveReplacePlaceholderWithArray(self, state, arrays):
def recursive(state):
for key, val in list(state.items()):
if isinstance(val, dict):
recursive(val)
elif isinstance(val, str) and val.startswith('arr_'):
state[key] = arrays[val]
recursive(state)
def restoreCurrentState(self):
if self.current_session:
orig = self.tmp_dir_save_session
path = self.tmp_dir_save_session = self.tmp_dir_session.join(
self.current_session)
with open(path.join('state.pickle'), "rb") as f:
state = pickle.load(f)
p = path.join('arrays.npz')
if p.exists():
arrays = np.load(path.join('arrays.npz'))
self._recusiveReplacePlaceholderWithArray(state, arrays)
self.dialogs.restoreState(state['dialogs'])
self.opts.update(state['session'])
self.sigRestore.emit(state)
self.tmp_dir_save_session = orig
print(
"==> State [%s] restored from '%s'" %
(self.current_session, self.path))
def addSession(self):
self.current_session = self.n_sessions
self.n_sessions += 1
self.tmp_dir_save_session = self.tmp_dir_session.join(
str(self.n_sessions)).mkdir()
self.checkMaxSessions()
def quit(self):
print('exiting...')
# RESET ICONS
if self._icons_enabled:
print('disable menu-icons')
os.system( # restore the standard-setting for seeing icons in the menus
'gconftool-2 --type Boolean --set /desktop/gnome/interface/menus_have_icons False')
# WAIT FOR PROMT IF IN DEBUG MODE
if self.opts['debugMode']:
input("Press any key to end the session...")
# REMOVE TEMP FOLDERS
try:
self.tmp_dir_session.remove()
# self.tmp_dir_work.remove()
except OSError:
pass # in case the folders are used by another process
with open(self.APP_CONFIG_FILE, 'w') as f:
f.write(str(self.opts))
# CLOSE LOG FILE
if self.log_file:
self.writeLog(False)
self.log_file.close()
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase"""
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name
def _showHelp(self):
sys.exit('''
%s-sessions can started with the following arguments:
[-h or --help] - show the help-page
[-d or --debug] - run in debugging-mode
[-l or --log] - create log file
[-n or --new] - start a new session, don'l load saved properties
[-exec [cmd]] - execute python code from this script/executable
''' % self.__class__.__name__)
def save(self):
"""save the current session
override, if session was saved earlier"""
if self.path:
self._saveState(self.path)
else:
self.saveAs()
def saveAs(self, filename=None):
if filename is None:
# ask for filename:
filename = self.dialogs.getSaveFileName(filter="*.%s" % self.FTYPE)
if filename:
self.path = filename
self._saveState(self.path)
if self._createdAutosaveFile:
self._createdAutosaveFile.remove()
print(
"removed automatically created '%s'" %
self._createdAutosaveFile)
self._createdAutosaveFile = None
def replace(self, path):
"""
replace current session with one given by file path
"""
self.setSessionPath(path)
self.restoreCurrentState()
def open(self):
"""open a session to define in a dialog in an extra window"""
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename)
def new(self, filename=None):
"""start a session an independent process"""
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path)
def registerMainWindow(self, win):
win.setWindowIcon(QtGui.QIcon(self.ICON))
self._mainWindow = win
win.show = self._showMainWindow
win.hide = self._hideMainWindow
if self.opts['server']:
server_ = Server(win)
win.hide()
else:
win.show()
@property
def tmp_dir_session(self):
# only create folder if needed
if self._tmp_dir_session is None:
# make temp-dir
# the directory where the content of the *pyz-file will be copied:
self._tmp_dir_session = PathStr(
tempfile.mkdtemp(
'%s_session' %
self.NAME))
return self._tmp_dir_session
def _showMainWindow(self):
try:
# restore autosave
del self._autosave
except AttributeError:
pass
self._mainWindow.__class__.show(self._mainWindow)
def _hideMainWindow(self):
# disable autosave when window is hidden
self._autosave = self.opts['autosave']
self.opts['autosave'] = False
self._mainWindow.__class__.hide(self._mainWindow)
def _saveState(self, path):
"""save current state and add a new state"""
self.addSession() # next session
self._save(str(self.n_sessions), path)
def _autoSave(self):
"""save state into 'autosave' """
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path)
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
"""
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run()
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path)
|
radjkarl/appBase
|
appbase/Session.py
|
_SaveThread._recusiveReplaceArrayWithPlaceholder
|
python
|
def _recusiveReplaceArrayWithPlaceholder(self, state):
"""
replace all numpy.array within the state dict
with a placeholder
this allows to save the arrays extra using numpy.save_compressed
"""
arrays = {}
def recursive(state):
for key, val in state.items():
if isinstance(val, dict):
recursive(val)
else:
if isinstance(val, np.ndarray):
name = 'arr_%i' % recursive.c
arrays[name] = val
state[key] = name
recursive.c += 1
recursive.c = 0
recursive(state)
return arrays
|
replace all numpy.array within the state dict
with a placeholder
this allows to save the arrays extra using numpy.save_compressed
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L516-L536
| null |
class _SaveThread(QtCore.QThread):
"""Run the saving procedure in a thread to be non-blocking
"""
def prepare(self, stateName, path, dirpath, state):
self.stateName = stateName
self.path = path
self.dirpath = dirpath
self._state = state
def _recusiveReplaceArrayWithPlaceholder(self, state):
"""
replace all numpy.array within the state dict
with a placeholder
this allows to save the arrays extra using numpy.save_compressed
"""
arrays = {}
def recursive(state):
for key, val in state.items():
if isinstance(val, dict):
recursive(val)
else:
if isinstance(val, np.ndarray):
name = 'arr_%i' % recursive.c
arrays[name] = val
state[key] = name
recursive.c += 1
recursive.c = 0
recursive(state)
return arrays
def run(self):
arrays = self._recusiveReplaceArrayWithPlaceholder(self._state)
# save state
p = self.dirpath.mkdir(self.stateName)
with open(p.join('state.pickle'), "wb") as f:
pickle.dump(self._state, f)
# save arrays
if len(arrays):
np.savez(p.join('arrays.npz'), **arrays)
del self._state
# create zip file
with ZipFile(self.path, 'w',
# FROM https://docs.python.org/2/library/zipfile.html :
# allowZip64 is True zipfile will create ZIP files
# that use the ZIP64 extensions when the zipfile is larger than 2
# GB. If it is false (the default) zipfile will raise an exception
# when the ZIP file would require ZIP64 extensions. ZIP64 extensions are
# disabled by default because the default zip and unzip commands on Unix
# (the InfoZIP utilities) don’t support these extensions.
allowZip64=True) as zipFile:
# copy a dir to the zip-file:
basedir = self.dirpath
for root, _, files in os.walk(self.dirpath):
dirname = root.replace(basedir, '')
for f in files:
zipFile.write(
os.path.join(
root, f), os.path.join(
dirname, f))
print("|%s| ==> State [%s] saved to '%s'" % (
strftime("%H:%M:%S", gmtime()),
self.stateName, self.path))
|
radjkarl/appBase
|
setup.py
|
read
|
python
|
def read(*paths):
"""Build a file path from *paths* and return the contents."""
p = os.path.join(*paths)
if os.path.exists(p):
with open(p, 'r') as f:
return f.read()
return ''
|
Build a file path from *paths* and return the contents.
|
train
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/setup.py#L36-L42
| null |
# -*- coding: utf-8 -*-
"""
usage:
(sudo) python setup.py +
install ... local
register ... at http://pypi.python.org/pypi
sdist ... create *.tar to be uploaded to pyPI
sdist upload ... build the package and upload in to pyPI
"""
import os
import shutil
from setuptools import find_packages
from setuptools import setup as setup
import appbase as package
# a template for the python setup.py installer routine
#
# * take setup information from the packages __init__.py file
# * this way these informations, like...
# - __email__
# - __version__
# - __depencies__
# are still available after installation
#
# * exclude /tests*
# * create scripts from all files in /bin
# * create the long description from
# - /README.rst
# - /CHANGES.rst
# - /AUTHORS.rst
#
# * remove /build at the end
def read(*paths):
"""Build a file path from *paths* and return the contents."""
p = os.path.join(*paths)
if os.path.exists(p):
with open(p, 'r') as f:
return f.read()
return ''
setup(
name=package.__name__,
version=package.__version__,
author=package.__author__,
author_email=package.__email__,
url=package.__url__,
license=package.__license__,
install_requires=[
"numpy >= 1.7.1",
"qtpy",
"fancytools >= 0.2",
"fancywidgets >= 0.1"
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
],
description=package.__doc__,
packages=find_packages(exclude=['tests*']),
include_package_data=True,
# scripts = [] if not os.path.exists('bin') else [
# os.path.join('bin',x) for x in os.listdir('bin')],
long_description=(
read('README.rst') + '\n\n' +
read('CHANGES.rst') + '\n\n' +
read('AUTHORS.rst'))
)
# remove the build
# else old and notexistent files could come again in the installed pkg
mainPath = os.path.abspath(os.path.dirname(__file__))
bPath = os.path.join(mainPath, 'build')
if os.path.exists(bPath):
shutil.rmtree(bPath)
# if __name__ == '__main__':
# import appbase
# import sys
#
# setup(appbase)
#
#
# #LAUNCHER NEEDS SOME WORK - UNTIL THATS DONE: DONT RUN THE FOLLOWING
# INSTALL_LAUNCHER_STARTER = False
#
# if INSTALL_LAUNCHER_STARTER:
# if 'install' in sys.argv:
# while True:
# answer = raw_input('Do you want to a start menu entry for the appbase Launcher? [Y,N] ')
# if answer.lower() in ('y', 'n', ''):
# break
# print("Please answer with 'Y' or 'N'")
# if answer == 'Y':
# from fancytools.os import StartMenuEntry
# from appbase.Launcher import Launcher
# icon = os.path.join(os.curdir, 'media', 'launcher_logo.svg')
# StartMenuEntry('pyz_launcher', Launcher.__file__, os.path.abspath(icon)).create()
|
anti1869/sunhead
|
src/sunhead/conf.py
|
Settings.discover_config_path
|
python
|
def discover_config_path(self, config_filename: str) -> str:
if config_filename and os.path.isfile(config_filename):
return config_filename
for place in _common_places:
config_path = os.path.join(place, config_filename)
if os.path.isfile(config_path):
return config_path
return
|
Search for config file in a number of places.
If there is no config file found, will return None.
:param config_filename: Config file name or custom path to filename with config.
:return: Path to the discovered config file or None.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/conf.py#L120-L137
| null |
class Settings(object):
def __init__(self):
self._configured = False
self._configuring = False
self._envvar = DEFAULT_ENVIRONMENT_VARIABLE
def __getattribute__(self, item):
# Some sort of Lazy Settings implementation.
# All this stuff is to allow using custom settings envvar names.
# Settings will be configured upon first element fetch or manually, using ``configure`` method
passthrough = {"_configuring", "_configured", "configure"}
if item not in passthrough and not self._configuring and not self._configured:
self.configure()
return super().__getattribute__(item)
def configure(self, settings_module: str = None, custom_envvar: str = None, fallback_module: str = None) -> None:
# Disable __getattribute__ hook while this method is run.
self._configuring = True
if custom_envvar:
self._envvar = custom_envvar
global_config = import_module(GLOBAL_CONFIG_MODULE)
settings_module_name = settings_module or self.get_settings_module_name(fallback_module)
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_config):
if setting.isupper():
setattr(self, setting, getattr(global_config, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module_name
mod = import_module(self.SETTINGS_MODULE)
tuple_settings = (
# Add your tuple settings here
# "INSTALLED_APPS"
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured(
"The %s setting must be a list or a tuple. Please fix your settings." % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
self.update_from_config_file()
self.configure_logging()
# Disable __gettattr__ hook completely
self._configured = True
self._configuring = False
def get_settings_module_name(self, fallback_module):
name = os.environ.get(self._envvar, None) or fallback_module
if not name:
raise ImproperlyConfigured(
"Settings module not found. Got %s in %s var and there is no fallback provided." % (name, self._envvar)
)
return name
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
def update_from_config_file(self):
filename = getattr(self, "CONFIG_FILENAME", None)
if not filename:
return
config_path = self.discover_config_path(filename)
for setting, setting_value in self.gen_from_yaml_config(config_path):
# if setting == "LOGGING": # Special case, will think better solution later
# setting_value = self.get_overriden_dict_config(setting_value)
setattr(self, setting, setting_value)
def gen_from_yaml_config(self, config_path: str) -> Iterator:
"""
Convention is to uppercase first level keys.
:param config_path: Valid path to the yml config file.
:return: Config loaded from yml file
"""
if not config_path:
return {}
with open(config_path, 'r') as f:
yaml_config = yaml.load(f)
gen = map(lambda x: (x[0].upper(), x[1]), yaml_config.items())
return gen
def configure_logging(self):
logging = getattr(self, "LOGGING", None)
if not logging:
return
self.remove_unused_handlers(logging, {})
dictConfig(logging)
def remove_unused_handlers(self, dict_config, requested_handlers):
should_not_be_empty = (
("file", "filename"),
("sentry", "dsn"),
("syslog", "address"),
)
for handler, key in should_not_be_empty:
self.remove_handler_if_not_configured(dict_config, {}, handler, key)
def remove_handler_if_not_configured(self, dict_config, requested_handlers, handler_name, check_key) -> None:
"""
Remove ``handler_name`` from ``dict_config`` and ``requested_handlers`` if ``check_key`` is empty.
"""
try:
if not dict_config["handlers"][handler_name][check_key]:
dict_config["handlers"].pop(handler_name)
if handler_name in requested_handlers:
requested_handlers.remove(handler_name)
except KeyError:
# Ignore key errors
pass
|
anti1869/sunhead
|
src/sunhead/conf.py
|
Settings.gen_from_yaml_config
|
python
|
def gen_from_yaml_config(self, config_path: str) -> Iterator:
if not config_path:
return {}
with open(config_path, 'r') as f:
yaml_config = yaml.load(f)
gen = map(lambda x: (x[0].upper(), x[1]), yaml_config.items())
return gen
|
Convention is to uppercase first level keys.
:param config_path: Valid path to the yml config file.
:return: Config loaded from yml file
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/conf.py#L139-L154
| null |
class Settings(object):
def __init__(self):
self._configured = False
self._configuring = False
self._envvar = DEFAULT_ENVIRONMENT_VARIABLE
def __getattribute__(self, item):
# Some sort of Lazy Settings implementation.
# All this stuff is to allow using custom settings envvar names.
# Settings will be configured upon first element fetch or manually, using ``configure`` method
passthrough = {"_configuring", "_configured", "configure"}
if item not in passthrough and not self._configuring and not self._configured:
self.configure()
return super().__getattribute__(item)
def configure(self, settings_module: str = None, custom_envvar: str = None, fallback_module: str = None) -> None:
# Disable __getattribute__ hook while this method is run.
self._configuring = True
if custom_envvar:
self._envvar = custom_envvar
global_config = import_module(GLOBAL_CONFIG_MODULE)
settings_module_name = settings_module or self.get_settings_module_name(fallback_module)
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_config):
if setting.isupper():
setattr(self, setting, getattr(global_config, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module_name
mod = import_module(self.SETTINGS_MODULE)
tuple_settings = (
# Add your tuple settings here
# "INSTALLED_APPS"
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured(
"The %s setting must be a list or a tuple. Please fix your settings." % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
self.update_from_config_file()
self.configure_logging()
# Disable __gettattr__ hook completely
self._configured = True
self._configuring = False
def get_settings_module_name(self, fallback_module):
name = os.environ.get(self._envvar, None) or fallback_module
if not name:
raise ImproperlyConfigured(
"Settings module not found. Got %s in %s var and there is no fallback provided." % (name, self._envvar)
)
return name
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
def update_from_config_file(self):
filename = getattr(self, "CONFIG_FILENAME", None)
if not filename:
return
config_path = self.discover_config_path(filename)
for setting, setting_value in self.gen_from_yaml_config(config_path):
# if setting == "LOGGING": # Special case, will think better solution later
# setting_value = self.get_overriden_dict_config(setting_value)
setattr(self, setting, setting_value)
def discover_config_path(self, config_filename: str) -> str:
"""
Search for config file in a number of places.
If there is no config file found, will return None.
:param config_filename: Config file name or custom path to filename with config.
:return: Path to the discovered config file or None.
"""
if config_filename and os.path.isfile(config_filename):
return config_filename
for place in _common_places:
config_path = os.path.join(place, config_filename)
if os.path.isfile(config_path):
return config_path
return
def configure_logging(self):
logging = getattr(self, "LOGGING", None)
if not logging:
return
self.remove_unused_handlers(logging, {})
dictConfig(logging)
def remove_unused_handlers(self, dict_config, requested_handlers):
should_not_be_empty = (
("file", "filename"),
("sentry", "dsn"),
("syslog", "address"),
)
for handler, key in should_not_be_empty:
self.remove_handler_if_not_configured(dict_config, {}, handler, key)
def remove_handler_if_not_configured(self, dict_config, requested_handlers, handler_name, check_key) -> None:
"""
Remove ``handler_name`` from ``dict_config`` and ``requested_handlers`` if ``check_key`` is empty.
"""
try:
if not dict_config["handlers"][handler_name][check_key]:
dict_config["handlers"].pop(handler_name)
if handler_name in requested_handlers:
requested_handlers.remove(handler_name)
except KeyError:
# Ignore key errors
pass
|
anti1869/sunhead
|
src/sunhead/conf.py
|
Settings.remove_handler_if_not_configured
|
python
|
def remove_handler_if_not_configured(self, dict_config, requested_handlers, handler_name, check_key) -> None:
try:
if not dict_config["handlers"][handler_name][check_key]:
dict_config["handlers"].pop(handler_name)
if handler_name in requested_handlers:
requested_handlers.remove(handler_name)
except KeyError:
# Ignore key errors
pass
|
Remove ``handler_name`` from ``dict_config`` and ``requested_handlers`` if ``check_key`` is empty.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/conf.py#L173-L184
| null |
class Settings(object):
def __init__(self):
self._configured = False
self._configuring = False
self._envvar = DEFAULT_ENVIRONMENT_VARIABLE
def __getattribute__(self, item):
# Some sort of Lazy Settings implementation.
# All this stuff is to allow using custom settings envvar names.
# Settings will be configured upon first element fetch or manually, using ``configure`` method
passthrough = {"_configuring", "_configured", "configure"}
if item not in passthrough and not self._configuring and not self._configured:
self.configure()
return super().__getattribute__(item)
def configure(self, settings_module: str = None, custom_envvar: str = None, fallback_module: str = None) -> None:
# Disable __getattribute__ hook while this method is run.
self._configuring = True
if custom_envvar:
self._envvar = custom_envvar
global_config = import_module(GLOBAL_CONFIG_MODULE)
settings_module_name = settings_module or self.get_settings_module_name(fallback_module)
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_config):
if setting.isupper():
setattr(self, setting, getattr(global_config, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module_name
mod = import_module(self.SETTINGS_MODULE)
tuple_settings = (
# Add your tuple settings here
# "INSTALLED_APPS"
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured(
"The %s setting must be a list or a tuple. Please fix your settings." % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
self.update_from_config_file()
self.configure_logging()
# Disable __gettattr__ hook completely
self._configured = True
self._configuring = False
def get_settings_module_name(self, fallback_module):
name = os.environ.get(self._envvar, None) or fallback_module
if not name:
raise ImproperlyConfigured(
"Settings module not found. Got %s in %s var and there is no fallback provided." % (name, self._envvar)
)
return name
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
def update_from_config_file(self):
filename = getattr(self, "CONFIG_FILENAME", None)
if not filename:
return
config_path = self.discover_config_path(filename)
for setting, setting_value in self.gen_from_yaml_config(config_path):
# if setting == "LOGGING": # Special case, will think better solution later
# setting_value = self.get_overriden_dict_config(setting_value)
setattr(self, setting, setting_value)
def discover_config_path(self, config_filename: str) -> str:
"""
Search for config file in a number of places.
If there is no config file found, will return None.
:param config_filename: Config file name or custom path to filename with config.
:return: Path to the discovered config file or None.
"""
if config_filename and os.path.isfile(config_filename):
return config_filename
for place in _common_places:
config_path = os.path.join(place, config_filename)
if os.path.isfile(config_path):
return config_path
return
def gen_from_yaml_config(self, config_path: str) -> Iterator:
"""
Convention is to uppercase first level keys.
:param config_path: Valid path to the yml config file.
:return: Config loaded from yml file
"""
if not config_path:
return {}
with open(config_path, 'r') as f:
yaml_config = yaml.load(f)
gen = map(lambda x: (x[0].upper(), x[1]), yaml_config.items())
return gen
def configure_logging(self):
logging = getattr(self, "LOGGING", None)
if not logging:
return
self.remove_unused_handlers(logging, {})
dictConfig(logging)
def remove_unused_handlers(self, dict_config, requested_handlers):
should_not_be_empty = (
("file", "filename"),
("sentry", "dsn"),
("syslog", "address"),
)
for handler, key in should_not_be_empty:
self.remove_handler_if_not_configured(dict_config, {}, handler, key)
|
anti1869/sunhead
|
src/sunhead/serializers/json.py
|
JSONSerializer.json_serial
|
python
|
def json_serial(cls, obj):
if isinstance(obj, datetime):
serial = obj.isoformat()
elif issubclass(obj.__class__, enum.Enum):
serial = obj.value
elif isinstance(obj, timedelta):
serial = str(obj)
elif isinstance(obj, set):
serial = list(x for x in obj)
elif isinstance(obj, uuid.UUID):
serial = str(obj)
# FIXME: if you want to add one more custom serializer, think twice about `singledispatch`
else:
raise TypeError("Type not serializable %s in %s" % (type(obj), obj))
return serial
|
JSON serializer for objects not serializable by default json code
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/serializers/json.py#L34-L57
| null |
class JSONSerializer(AbstractSerializer):
_DEF_SERIALIZED_DEFAULT = "{}"
_DEF_DESERIALIZED_DEFAULT = {}
def __init__(self, graceful=False):
super().__init__(graceful)
self._graceful = graceful
self._serialized_default = self._DEF_SERIALIZED_DEFAULT
self._deserialized_default = self._DEF_DESERIALIZED_DEFAULT
@classmethod
@property
def graceful(self):
return self._graceful
@graceful.setter
def graceful(self, value):
self._graceful = value
def set_defaults(self, serialized, unserialized):
self._serialized_default = serialized
self._deserialized_default = unserialized
def serialize(self, data: Transferrable, **kwargs) -> Serialized:
kwargs.setdefault("default", self.json_serial)
try:
serialized = json.dumps(data, **kwargs)
except Exception:
logger.error("Message serialization error", exc_info=True)
if not self.graceful:
raise SerializationError
serialized = self._serialized_default
return serialized
def deserialize(self, msg: Serialized) -> Transferrable:
body_txt = msg.decode("utf-8") if hasattr(msg, "decode") else msg
try:
deserialized = json.loads(body_txt)
except json.JSONDecodeError:
logger.error("Error deserializing message body", exc_info=True)
if not self.graceful:
raise SerializationError
deserialized = self._deserialized_default
return deserialized
|
anti1869/sunhead
|
src/sunhead/decorators.py
|
cached_property
|
python
|
def cached_property():
def _stored_value(f):
storage_var_name = "__{}".format(f.__name__)
def _wrapper(self, *args, **kwargs):
value_in_cache = getattr(self, storage_var_name, Sentinel)
if value_in_cache is not Sentinel:
return value_in_cache
calculated_value = f(self, *args, **kwargs)
setattr(self, storage_var_name, calculated_value)
return calculated_value
return _wrapper
return _stored_value
|
Handy utility to build caching properties in your classes.
Decorated code will be run only once and then result will be stored in private class property
with the given name. When called for the second time, property will return cached value.
:param storage_var_name: Name of the class property to store cached data.
:type storage_var_name: str
:return: Decorator for the class property
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/decorators.py#L18-L40
| null |
"""
Miscellaneous decorators to make your life easier.
"""
import logging
default_logger = logging.getLogger(__name__)
DEPRECATION_MESSAGE = "WARNING: Using {name} is deprecated. It will be removed soon"
class Sentinel(object):
"""Use this instead of None"""
def deprecated(message=DEPRECATION_MESSAGE, logger=None):
"""
This decorator will simply print warning before running decoratee.
So, presumably, you want to use it with console-based commands.
:return: Decorator for the function.
"""
if logger is None:
logger = default_logger
def _deprecated(f):
def _wrapper(*args, **kwargs):
f_name = f.__name__
logger.warning(message.format(name=f_name))
result = f(*args, **kwargs)
return result
return _wrapper
return _deprecated
|
anti1869/sunhead
|
src/sunhead/decorators.py
|
deprecated
|
python
|
def deprecated(message=DEPRECATION_MESSAGE, logger=None):
if logger is None:
logger = default_logger
def _deprecated(f):
def _wrapper(*args, **kwargs):
f_name = f.__name__
logger.warning(message.format(name=f_name))
result = f(*args, **kwargs)
return result
return _wrapper
return _deprecated
|
This decorator will simply print warning before running decoratee.
So, presumably, you want to use it with console-based commands.
:return: Decorator for the function.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/decorators.py#L43-L59
| null |
"""
Miscellaneous decorators to make your life easier.
"""
import logging
default_logger = logging.getLogger(__name__)
DEPRECATION_MESSAGE = "WARNING: Using {name} is deprecated. It will be removed soon"
class Sentinel(object):
"""Use this instead of None"""
def cached_property():
"""
Handy utility to build caching properties in your classes.
Decorated code will be run only once and then result will be stored in private class property
with the given name. When called for the second time, property will return cached value.
:param storage_var_name: Name of the class property to store cached data.
:type storage_var_name: str
:return: Decorator for the class property
"""
def _stored_value(f):
storage_var_name = "__{}".format(f.__name__)
def _wrapper(self, *args, **kwargs):
value_in_cache = getattr(self, storage_var_name, Sentinel)
if value_in_cache is not Sentinel:
return value_in_cache
calculated_value = f(self, *args, **kwargs)
setattr(self, storage_var_name, calculated_value)
return calculated_value
return _wrapper
return _stored_value
|
anti1869/sunhead
|
src/sunhead/events/transports/amqp.py
|
AMQPClient.connect
|
python
|
async def connect(self):
if self.connected or self.is_connecting:
return
self._is_connecting = True
try:
logger.info("Connecting to RabbitMQ...")
self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters)
logger.info("Getting channel...")
self._channel = await self._protocol.channel()
if self._global_qos is not None:
logger.info("Setting prefetch count on connection (%s)", self._global_qos)
await self._channel.basic_qos(0, self._global_qos, 1)
logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type)
await self._channel.exchange(self._exchange_name, self._exchange_type)
except (aioamqp.AmqpClosedConnection, Exception):
logger.error("Error initializing RabbitMQ connection", exc_info=True)
self._is_connecting = False
raise exceptions.StreamConnectionError
self._is_connecting = False
|
Create new asynchronous connection to the RabbitMQ instance.
This will connect, declare exchange and bind itself to the configured queue.
After that, client is ready to publish or consume messages.
:return: Does not return anything.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/events/transports/amqp.py#L87-L119
| null |
class AMQPClient(AbstractTransport):
"""
Handy implementation of the asynchronous AMQP client.
Useful for building both publishers and consumers.
"""
DEFAULT_EXCHANGE_NAME = "default_exchange"
DEFAULT_EXCHANGE_TYPE = "topic"
def __init__(
self,
connection_parameters: dict,
exchange_name: str = DEFAULT_EXCHANGE_NAME,
exchange_type: str = DEFAULT_EXCHANGE_TYPE,
global_qos: Optional[int] = None,
**kwargs):
"""
There must be at least these members of the connection_parameters dict::
"connection_parameters": {
"login": "",
"password": "",
"host": "",
"port": "",
"virtualhost": "",
},
:param connection_parameters: Dict with connection parameters. See above for its format.
:return: EventsQueueClient instance.
"""
# Can not pass empty password when connecting. Must remove the field completely.
if not connection_parameters.get("password", ""):
connection_parameters.pop("password", None)
self._connection_parameters = connection_parameters or {}
self._transport = None
self._protocol = None
self._channel = None
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._global_qos = global_qos
self._serializer = self._get_serializer()
self._is_connecting = False
self._connection_guid = str(uuid4())
self._known_queues = {}
self._routing = {}
def _get_serializer(self):
# TODO: Make serializer configurable here
return JSONSerializer()
@property
def connected(self):
return self._channel is not None and self._channel.is_open
@property
def is_connecting(self) -> bool:
return self._is_connecting
async def close(self):
self._protocol.stop()
await self._channel.close()
async def publish(self, data: Transferrable, topic: AnyStr) -> None:
if not self.connected:
logger.warning("Attempted to send message while not connected")
return
body = self._serializer.serialize(data)
await self._channel.publish(
body,
exchange_name=self._exchange_name,
routing_key=topic
)
# Uncomment for debugging
# logger.debug("Published message to AMQP exchange=%s, topic=%s", self._exchange_name, topic)
async def consume_queue(self, subscriber: AbstractSubscriber) -> None:
"""
Subscribe to the queue consuming.
:param subscriber:
:return:
"""
queue_name = subscriber.name
topics = subscriber.requested_topics
if queue_name in self._known_queues:
raise exceptions.ConsumerError("Queue '%s' already being consumed" % queue_name)
await self._declare_queue(queue_name)
# TODO: There is a lot of room to improvement here. Figure out routing done the right way
for key in topics:
self._routing.setdefault(key, set())
if subscriber in self._routing[key]:
logger.warning("Subscriber '%s' already receiving routing_key '%s'", subscriber, key)
break
await self._bind_key_to_queue(key, queue_name)
self._routing[key].add(subscriber)
logger.info("Consuming queue '%s'", queue_name)
await asyncio.wait_for(
self._channel.basic_consume(callback=self._on_message, queue_name=queue_name),
timeout=10
)
self._add_to_known_queue(queue_name)
async def _declare_queue(self, queue_name: AnyStr) -> None:
logger.info("Declaring queue...")
queue_declaration = await self._channel.queue_declare(queue_name)
queue_name = queue_declaration.get("queue")
logger.info("Declared queue '%s'", queue_name)
async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None:
"""
Bind to queue with specified routing key.
:param routing_key: Routing key to bind with.
:param queue_name: Name of the queue
:return: Does not return anything
"""
logger.info("Binding key='%s'", routing_key)
result = await self._channel.queue_bind(
exchange_name=self._exchange_name,
queue_name=queue_name,
routing_key=routing_key,
)
return result
async def _on_message(self, channel, body, envelope, properties) -> None:
"""
Fires up when message is received by this consumer.
:param channel: Channel, through which message is received
:param body: Body of the message (serialized).
:param envelope: Envelope object with message meta
:type envelope: aioamqp.Envelope
:param properties: Properties of the message
:return: Coroutine object with result of message handling operation
"""
subscribers = self._get_subscribers(envelope.routing_key)
if not subscribers:
logger.debug("No route for message with key '%s'", envelope.routing_key)
return
body = self._serializer.deserialize(body)
for subscriber in subscribers:
# Check later if ensure_future can be applied here
await subscriber.on_message(body, envelope.routing_key)
await self._channel.basic_client_ack(envelope.delivery_tag)
def _get_subscribers(self, incoming_routing_key: AnyStr) -> Sequence[AbstractSubscriber]:
for key, subscribers in self._routing.items():
if fnmatch(incoming_routing_key, key):
return subscribers
return tuple()
def _add_to_known_queue(self, queue_name: AnyStr) -> None:
self._known_queues[queue_name] = {
"bound_keys": set(),
}
|
anti1869/sunhead
|
src/sunhead/events/transports/amqp.py
|
AMQPClient.consume_queue
|
python
|
async def consume_queue(self, subscriber: AbstractSubscriber) -> None:
queue_name = subscriber.name
topics = subscriber.requested_topics
if queue_name in self._known_queues:
raise exceptions.ConsumerError("Queue '%s' already being consumed" % queue_name)
await self._declare_queue(queue_name)
# TODO: There is a lot of room to improvement here. Figure out routing done the right way
for key in topics:
self._routing.setdefault(key, set())
if subscriber in self._routing[key]:
logger.warning("Subscriber '%s' already receiving routing_key '%s'", subscriber, key)
break
await self._bind_key_to_queue(key, queue_name)
self._routing[key].add(subscriber)
logger.info("Consuming queue '%s'", queue_name)
await asyncio.wait_for(
self._channel.basic_consume(callback=self._on_message, queue_name=queue_name),
timeout=10
)
self._add_to_known_queue(queue_name)
|
Subscribe to the queue consuming.
:param subscriber:
:return:
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/events/transports/amqp.py#L140-L171
| null |
class AMQPClient(AbstractTransport):
"""
Handy implementation of the asynchronous AMQP client.
Useful for building both publishers and consumers.
"""
DEFAULT_EXCHANGE_NAME = "default_exchange"
DEFAULT_EXCHANGE_TYPE = "topic"
def __init__(
self,
connection_parameters: dict,
exchange_name: str = DEFAULT_EXCHANGE_NAME,
exchange_type: str = DEFAULT_EXCHANGE_TYPE,
global_qos: Optional[int] = None,
**kwargs):
"""
There must be at least these members of the connection_parameters dict::
"connection_parameters": {
"login": "",
"password": "",
"host": "",
"port": "",
"virtualhost": "",
},
:param connection_parameters: Dict with connection parameters. See above for its format.
:return: EventsQueueClient instance.
"""
# Can not pass empty password when connecting. Must remove the field completely.
if not connection_parameters.get("password", ""):
connection_parameters.pop("password", None)
self._connection_parameters = connection_parameters or {}
self._transport = None
self._protocol = None
self._channel = None
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._global_qos = global_qos
self._serializer = self._get_serializer()
self._is_connecting = False
self._connection_guid = str(uuid4())
self._known_queues = {}
self._routing = {}
def _get_serializer(self):
# TODO: Make serializer configurable here
return JSONSerializer()
@property
def connected(self):
return self._channel is not None and self._channel.is_open
@property
def is_connecting(self) -> bool:
return self._is_connecting
async def connect(self):
"""
Create new asynchronous connection to the RabbitMQ instance.
This will connect, declare exchange and bind itself to the configured queue.
After that, client is ready to publish or consume messages.
:return: Does not return anything.
"""
if self.connected or self.is_connecting:
return
self._is_connecting = True
try:
logger.info("Connecting to RabbitMQ...")
self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters)
logger.info("Getting channel...")
self._channel = await self._protocol.channel()
if self._global_qos is not None:
logger.info("Setting prefetch count on connection (%s)", self._global_qos)
await self._channel.basic_qos(0, self._global_qos, 1)
logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type)
await self._channel.exchange(self._exchange_name, self._exchange_type)
except (aioamqp.AmqpClosedConnection, Exception):
logger.error("Error initializing RabbitMQ connection", exc_info=True)
self._is_connecting = False
raise exceptions.StreamConnectionError
self._is_connecting = False
async def close(self):
self._protocol.stop()
await self._channel.close()
async def publish(self, data: Transferrable, topic: AnyStr) -> None:
if not self.connected:
logger.warning("Attempted to send message while not connected")
return
body = self._serializer.serialize(data)
await self._channel.publish(
body,
exchange_name=self._exchange_name,
routing_key=topic
)
# Uncomment for debugging
# logger.debug("Published message to AMQP exchange=%s, topic=%s", self._exchange_name, topic)
async def _declare_queue(self, queue_name: AnyStr) -> None:
logger.info("Declaring queue...")
queue_declaration = await self._channel.queue_declare(queue_name)
queue_name = queue_declaration.get("queue")
logger.info("Declared queue '%s'", queue_name)
async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None:
"""
Bind to queue with specified routing key.
:param routing_key: Routing key to bind with.
:param queue_name: Name of the queue
:return: Does not return anything
"""
logger.info("Binding key='%s'", routing_key)
result = await self._channel.queue_bind(
exchange_name=self._exchange_name,
queue_name=queue_name,
routing_key=routing_key,
)
return result
async def _on_message(self, channel, body, envelope, properties) -> None:
"""
Fires up when message is received by this consumer.
:param channel: Channel, through which message is received
:param body: Body of the message (serialized).
:param envelope: Envelope object with message meta
:type envelope: aioamqp.Envelope
:param properties: Properties of the message
:return: Coroutine object with result of message handling operation
"""
subscribers = self._get_subscribers(envelope.routing_key)
if not subscribers:
logger.debug("No route for message with key '%s'", envelope.routing_key)
return
body = self._serializer.deserialize(body)
for subscriber in subscribers:
# Check later if ensure_future can be applied here
await subscriber.on_message(body, envelope.routing_key)
await self._channel.basic_client_ack(envelope.delivery_tag)
def _get_subscribers(self, incoming_routing_key: AnyStr) -> Sequence[AbstractSubscriber]:
for key, subscribers in self._routing.items():
if fnmatch(incoming_routing_key, key):
return subscribers
return tuple()
def _add_to_known_queue(self, queue_name: AnyStr) -> None:
self._known_queues[queue_name] = {
"bound_keys": set(),
}
|
anti1869/sunhead
|
src/sunhead/events/transports/amqp.py
|
AMQPClient._bind_key_to_queue
|
python
|
async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None:
logger.info("Binding key='%s'", routing_key)
result = await self._channel.queue_bind(
exchange_name=self._exchange_name,
queue_name=queue_name,
routing_key=routing_key,
)
return result
|
Bind to queue with specified routing key.
:param routing_key: Routing key to bind with.
:param queue_name: Name of the queue
:return: Does not return anything
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/events/transports/amqp.py#L179-L194
| null |
class AMQPClient(AbstractTransport):
"""
Handy implementation of the asynchronous AMQP client.
Useful for building both publishers and consumers.
"""
DEFAULT_EXCHANGE_NAME = "default_exchange"
DEFAULT_EXCHANGE_TYPE = "topic"
def __init__(
self,
connection_parameters: dict,
exchange_name: str = DEFAULT_EXCHANGE_NAME,
exchange_type: str = DEFAULT_EXCHANGE_TYPE,
global_qos: Optional[int] = None,
**kwargs):
"""
There must be at least these members of the connection_parameters dict::
"connection_parameters": {
"login": "",
"password": "",
"host": "",
"port": "",
"virtualhost": "",
},
:param connection_parameters: Dict with connection parameters. See above for its format.
:return: EventsQueueClient instance.
"""
# Can not pass empty password when connecting. Must remove the field completely.
if not connection_parameters.get("password", ""):
connection_parameters.pop("password", None)
self._connection_parameters = connection_parameters or {}
self._transport = None
self._protocol = None
self._channel = None
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._global_qos = global_qos
self._serializer = self._get_serializer()
self._is_connecting = False
self._connection_guid = str(uuid4())
self._known_queues = {}
self._routing = {}
def _get_serializer(self):
# TODO: Make serializer configurable here
return JSONSerializer()
@property
def connected(self):
return self._channel is not None and self._channel.is_open
@property
def is_connecting(self) -> bool:
return self._is_connecting
async def connect(self):
"""
Create new asynchronous connection to the RabbitMQ instance.
This will connect, declare exchange and bind itself to the configured queue.
After that, client is ready to publish or consume messages.
:return: Does not return anything.
"""
if self.connected or self.is_connecting:
return
self._is_connecting = True
try:
logger.info("Connecting to RabbitMQ...")
self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters)
logger.info("Getting channel...")
self._channel = await self._protocol.channel()
if self._global_qos is not None:
logger.info("Setting prefetch count on connection (%s)", self._global_qos)
await self._channel.basic_qos(0, self._global_qos, 1)
logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type)
await self._channel.exchange(self._exchange_name, self._exchange_type)
except (aioamqp.AmqpClosedConnection, Exception):
logger.error("Error initializing RabbitMQ connection", exc_info=True)
self._is_connecting = False
raise exceptions.StreamConnectionError
self._is_connecting = False
async def close(self):
self._protocol.stop()
await self._channel.close()
async def publish(self, data: Transferrable, topic: AnyStr) -> None:
if not self.connected:
logger.warning("Attempted to send message while not connected")
return
body = self._serializer.serialize(data)
await self._channel.publish(
body,
exchange_name=self._exchange_name,
routing_key=topic
)
# Uncomment for debugging
# logger.debug("Published message to AMQP exchange=%s, topic=%s", self._exchange_name, topic)
async def consume_queue(self, subscriber: AbstractSubscriber) -> None:
"""
Subscribe to the queue consuming.
:param subscriber:
:return:
"""
queue_name = subscriber.name
topics = subscriber.requested_topics
if queue_name in self._known_queues:
raise exceptions.ConsumerError("Queue '%s' already being consumed" % queue_name)
await self._declare_queue(queue_name)
# TODO: There is a lot of room to improvement here. Figure out routing done the right way
for key in topics:
self._routing.setdefault(key, set())
if subscriber in self._routing[key]:
logger.warning("Subscriber '%s' already receiving routing_key '%s'", subscriber, key)
break
await self._bind_key_to_queue(key, queue_name)
self._routing[key].add(subscriber)
logger.info("Consuming queue '%s'", queue_name)
await asyncio.wait_for(
self._channel.basic_consume(callback=self._on_message, queue_name=queue_name),
timeout=10
)
self._add_to_known_queue(queue_name)
async def _declare_queue(self, queue_name: AnyStr) -> None:
logger.info("Declaring queue...")
queue_declaration = await self._channel.queue_declare(queue_name)
queue_name = queue_declaration.get("queue")
logger.info("Declared queue '%s'", queue_name)
async def _on_message(self, channel, body, envelope, properties) -> None:
"""
Fires up when message is received by this consumer.
:param channel: Channel, through which message is received
:param body: Body of the message (serialized).
:param envelope: Envelope object with message meta
:type envelope: aioamqp.Envelope
:param properties: Properties of the message
:return: Coroutine object with result of message handling operation
"""
subscribers = self._get_subscribers(envelope.routing_key)
if not subscribers:
logger.debug("No route for message with key '%s'", envelope.routing_key)
return
body = self._serializer.deserialize(body)
for subscriber in subscribers:
# Check later if ensure_future can be applied here
await subscriber.on_message(body, envelope.routing_key)
await self._channel.basic_client_ack(envelope.delivery_tag)
def _get_subscribers(self, incoming_routing_key: AnyStr) -> Sequence[AbstractSubscriber]:
for key, subscribers in self._routing.items():
if fnmatch(incoming_routing_key, key):
return subscribers
return tuple()
def _add_to_known_queue(self, queue_name: AnyStr) -> None:
self._known_queues[queue_name] = {
"bound_keys": set(),
}
|
anti1869/sunhead
|
src/sunhead/events/transports/amqp.py
|
AMQPClient._on_message
|
python
|
async def _on_message(self, channel, body, envelope, properties) -> None:
subscribers = self._get_subscribers(envelope.routing_key)
if not subscribers:
logger.debug("No route for message with key '%s'", envelope.routing_key)
return
body = self._serializer.deserialize(body)
for subscriber in subscribers:
# Check later if ensure_future can be applied here
await subscriber.on_message(body, envelope.routing_key)
await self._channel.basic_client_ack(envelope.delivery_tag)
|
Fires up when message is received by this consumer.
:param channel: Channel, through which message is received
:param body: Body of the message (serialized).
:param envelope: Envelope object with message meta
:type envelope: aioamqp.Envelope
:param properties: Properties of the message
:return: Coroutine object with result of message handling operation
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/events/transports/amqp.py#L196-L219
| null |
class AMQPClient(AbstractTransport):
"""
Handy implementation of the asynchronous AMQP client.
Useful for building both publishers and consumers.
"""
DEFAULT_EXCHANGE_NAME = "default_exchange"
DEFAULT_EXCHANGE_TYPE = "topic"
def __init__(
self,
connection_parameters: dict,
exchange_name: str = DEFAULT_EXCHANGE_NAME,
exchange_type: str = DEFAULT_EXCHANGE_TYPE,
global_qos: Optional[int] = None,
**kwargs):
"""
There must be at least these members of the connection_parameters dict::
"connection_parameters": {
"login": "",
"password": "",
"host": "",
"port": "",
"virtualhost": "",
},
:param connection_parameters: Dict with connection parameters. See above for its format.
:return: EventsQueueClient instance.
"""
# Can not pass empty password when connecting. Must remove the field completely.
if not connection_parameters.get("password", ""):
connection_parameters.pop("password", None)
self._connection_parameters = connection_parameters or {}
self._transport = None
self._protocol = None
self._channel = None
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._global_qos = global_qos
self._serializer = self._get_serializer()
self._is_connecting = False
self._connection_guid = str(uuid4())
self._known_queues = {}
self._routing = {}
def _get_serializer(self):
# TODO: Make serializer configurable here
return JSONSerializer()
@property
def connected(self):
return self._channel is not None and self._channel.is_open
@property
def is_connecting(self) -> bool:
return self._is_connecting
async def connect(self):
"""
Create new asynchronous connection to the RabbitMQ instance.
This will connect, declare exchange and bind itself to the configured queue.
After that, client is ready to publish or consume messages.
:return: Does not return anything.
"""
if self.connected or self.is_connecting:
return
self._is_connecting = True
try:
logger.info("Connecting to RabbitMQ...")
self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters)
logger.info("Getting channel...")
self._channel = await self._protocol.channel()
if self._global_qos is not None:
logger.info("Setting prefetch count on connection (%s)", self._global_qos)
await self._channel.basic_qos(0, self._global_qos, 1)
logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type)
await self._channel.exchange(self._exchange_name, self._exchange_type)
except (aioamqp.AmqpClosedConnection, Exception):
logger.error("Error initializing RabbitMQ connection", exc_info=True)
self._is_connecting = False
raise exceptions.StreamConnectionError
self._is_connecting = False
async def close(self):
self._protocol.stop()
await self._channel.close()
async def publish(self, data: Transferrable, topic: AnyStr) -> None:
if not self.connected:
logger.warning("Attempted to send message while not connected")
return
body = self._serializer.serialize(data)
await self._channel.publish(
body,
exchange_name=self._exchange_name,
routing_key=topic
)
# Uncomment for debugging
# logger.debug("Published message to AMQP exchange=%s, topic=%s", self._exchange_name, topic)
async def consume_queue(self, subscriber: AbstractSubscriber) -> None:
"""
Subscribe to the queue consuming.
:param subscriber:
:return:
"""
queue_name = subscriber.name
topics = subscriber.requested_topics
if queue_name in self._known_queues:
raise exceptions.ConsumerError("Queue '%s' already being consumed" % queue_name)
await self._declare_queue(queue_name)
# TODO: There is a lot of room to improvement here. Figure out routing done the right way
for key in topics:
self._routing.setdefault(key, set())
if subscriber in self._routing[key]:
logger.warning("Subscriber '%s' already receiving routing_key '%s'", subscriber, key)
break
await self._bind_key_to_queue(key, queue_name)
self._routing[key].add(subscriber)
logger.info("Consuming queue '%s'", queue_name)
await asyncio.wait_for(
self._channel.basic_consume(callback=self._on_message, queue_name=queue_name),
timeout=10
)
self._add_to_known_queue(queue_name)
async def _declare_queue(self, queue_name: AnyStr) -> None:
logger.info("Declaring queue...")
queue_declaration = await self._channel.queue_declare(queue_name)
queue_name = queue_declaration.get("queue")
logger.info("Declared queue '%s'", queue_name)
async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None:
"""
Bind to queue with specified routing key.
:param routing_key: Routing key to bind with.
:param queue_name: Name of the queue
:return: Does not return anything
"""
logger.info("Binding key='%s'", routing_key)
result = await self._channel.queue_bind(
exchange_name=self._exchange_name,
queue_name=queue_name,
routing_key=routing_key,
)
return result
def _get_subscribers(self, incoming_routing_key: AnyStr) -> Sequence[AbstractSubscriber]:
for key, subscribers in self._routing.items():
if fnmatch(incoming_routing_key, key):
return subscribers
return tuple()
def _add_to_known_queue(self, queue_name: AnyStr) -> None:
self._known_queues[queue_name] = {
"bound_keys": set(),
}
|
anti1869/sunhead
|
src/sunhead/metrics/factory.py
|
Metrics._disable_prometheus_process_collector
|
python
|
def _disable_prometheus_process_collector(self) -> None:
logger.info("Removing prometheus process collector")
try:
core.REGISTRY.unregister(PROCESS_COLLECTOR)
except KeyError:
logger.debug("PROCESS_COLLECTOR already removed from prometheus")
|
There is a bug in SDC' Docker implementation and intolerable prometheus_client code, due to which
its process_collector will fail.
See https://github.com/prometheus/client_python/issues/80
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/metrics/factory.py#L63-L74
| null |
class Metrics(object):
# Not sure it's needed
DEFAULT_APP_PREFIX = "sunhead"
SNAPSHOT_PROMETHEUS = "prometheus"
def __init__(self):
self._app_prefix = self.DEFAULT_APP_PREFIX
self._data = {
"counters": {},
"gauges": {},
"summaries": {},
"histograms": {},
}
self._process_collector = None
if not getattr(settings, "DISABLE_PROCESS_METRICS", False) \
and not getattr(settings, "USE_PROMETHEUS_PROCESS_METRICS", False):
self._process_collector = ProcessCollector()
self._process_collector.set_name_formatter(self.prefix)
self._disable_prometheus_process_collector()
@property
def counters(self) -> Dict:
return self._data["counters"]
@property
def gauges(self) -> Dict:
return self._data["gauges"]
@property
def summaries(self) -> Dict:
return self._data["summaries"]
@property
def histograms(self) -> Dict:
return self._data["histograms"]
@property
def all_metrics(self) -> Iterable:
all_metrics = chain(
self.counters.values(), self.gauges.values(), self.summaries.values(), self.histograms.values()
)
return all_metrics
def add_counter(self, name: str, *args) -> None:
if name in self.counters:
raise DuplicateMetricException("Counter %s already exist" % name)
self.counters[name] = (Counter(name, *args))
def add_gauge(self, name: str, *args) -> None:
if name in self.gauges:
raise DuplicateMetricException("Gauge %s already exist" % name)
self.gauges[name] = (Gauge(name, *args))
def add_summary(self, name: str, *args) -> None:
if name in self.summaries:
raise DuplicateMetricException("Summary %s already exist" % name)
self.summaries[name] = (Summary(name, *args))
def add_histogram(self, name: str, *args, **kwargs) -> None:
if name in self.histograms:
raise DuplicateMetricException("Histogram %s already exist" % name)
self.histograms[name] = (Histogram(name, *args, **kwargs))
def text_snapshot(self, output_format: str = SNAPSHOT_PROMETHEUS) -> str:
fn_name = "_get_{}_snapshot".format(output_format)
if not hasattr(self, fn_name):
raise IncorrectMetricsSnapshotFormatException("No such snapshot format: %s" % output_format)
result = getattr(self, fn_name)()
return result
def _get_prometheus_snapshot(self) -> str:
# Actually, this will produce all registered metrics, from all Metrics instances,
# due to the ``core.REGISTRY`` nature.
# Will fix it sometimes later.
snapshot = generate_latest(core.REGISTRY).decode()
if self._process_collector is not None:
snapshot += self._process_collector.text_snapshot()
return snapshot
@property
def app_name_prefix(self) -> str:
return self._app_prefix
@app_name_prefix.setter
def app_name_prefix(self, value: str) -> None:
self._app_prefix = value
def prefix(self, name: str):
"""
Prefix metrics name with configured app prefix. Use as shortcut.
:param name: Metric name (or any string, really).
"""
return "{}_{}".format(self.app_name_prefix, name)
|
anti1869/sunhead
|
src/sunhead/events/stream.py
|
init_stream_from_settings
|
python
|
async def init_stream_from_settings(cfg: dict) -> Stream:
cfg_name = cfg["active_stream"]
stream_init_kwargs = cfg["streams"][cfg_name]
stream = Stream(**stream_init_kwargs)
await stream.connect()
_stream_storage.push(cfg_name, stream)
return stream
|
Shortcut to create Stream from configured settings.
Will definitely fail if there is no meaningful configuration provided. Example of such is::
{
"streams": {
"rabbitmq": {
"transport": "sunhead.events.transports.amqp.AMQPClient",
"connection_parameters": {
"login": "guest",
"password": "",
"host": "localhost",
"port": 5672,
"virtualhost": "video",
},
"exchange_name": "video_bus",
"exchange_type": "topic",
"global_qos": None,
},
"kafka": {},
},
"active_stream": "rabbitmq",
}
:return: Instantiated Stream object.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/events/stream.py#L124-L157
|
[
"async def connect(self):\n if self.connected or self._transport.is_connecting:\n return\n\n try:\n await self._transport.connect()\n except StreamConnectionError:\n logger.error(\"Can't initialize Stream connection\", exc_info=True)\n finally:\n self._reconnecter.start()\n",
"def push(self, key: str, value: Stream):\n if not len(self):\n self[self.DEFAULT_KEY] = value\n self[key] = value\n"
] |
"""
Stream is a flow of messages in one particular bus. It could be RabbitMQ exchange, for example.
You can subscribe to messages from the Stream, organize distributed dequeuing or publish data there.
"""
import asyncio
from importlib import import_module
import logging
from typing import Sequence, AnyStr
from sunhead.events.abc import AbstractSubscriber, AbstractTransport, SingleConnectionMeta
from sunhead.events.exceptions import StreamConnectionError
from sunhead.periodical import crontab
from sunhead.events.types import Transferrable
logger = logging.getLogger(__name__)
DEFAULT_TRANSPORT = "brandt.events.transports.amqp.AMQPClient"
__all__ = ("Stream", "get_stream", "init_stream_from_settings")
class Stream(object):
CONNECTION_CHECK_SECS = 20
def __init__(self, transport=DEFAULT_TRANSPORT, **transport_init_kwargs):
self._transport_name = transport
self._transport_class = self._get_transport_class(self._transport_name)
self._transport = self._init_transport(self._transport_class, transport_init_kwargs)
self._reconnecter = crontab(
"* * * * * */{}".format(self.CONNECTION_CHECK_SECS), func=self._reconnect, start=False)
self._reconnect_attempts = 0
def _get_transport_class(self, transport_name) -> type:
module_name, class_name = transport_name.rsplit(".", 1)
try:
transport_module = import_module(module_name)
except ImportError:
logger.error("Can't import transport with name `%s`", module_name)
raise
try:
transport_class = getattr(transport_module, class_name)
except AttributeError:
logger.error("Can't get transport class `%s` from `%s`", class_name, module_name)
raise
return transport_class
def _init_transport(self, transport_class, transport_init_kwargs) -> AbstractTransport:
return transport_class(**transport_init_kwargs)
async def connect(self):
if self.connected or self._transport.is_connecting:
return
try:
await self._transport.connect()
except StreamConnectionError:
logger.error("Can't initialize Stream connection", exc_info=True)
finally:
self._reconnecter.start()
async def _reconnect(self):
logger.debug("Checking Stream connection is alive")
if self.connected or self._transport.is_connecting:
return
logger.info("Trying to reconnect Events Stream")
self._reconnect_attempts += 1
try:
await self.connect()
except StreamConnectionError:
logger.info("Unsuccessfull attempt to reconnect #%s", self._reconnect_attempts)
else:
self._reconnect_attempts = 0
@property
def connected(self) -> bool:
return self._transport.connected
async def publish(self, data: Transferrable, topics: Sequence[AnyStr]) -> None:
for topic in topics:
# asyncio.ensure_future(self._transport.publish(data, topic))
# Here's the deal. If multiple ``publish`` occurs immediately in a cycle,
# ensure_future will only happen after all this cycle completes, or there
# will be possibility window. So maybe better to use await here?
await self._transport.publish(data, topic)
async def subscribe(self, subscriber: AbstractSubscriber, topics: Sequence[AnyStr]) -> None:
raise NotImplementedError
async def dequeue(self, subscriber: AbstractSubscriber) -> None:
await self._transport.consume_queue(subscriber)
async def close(self):
logger.info("Closing Stream")
await self._transport.close()
class StreamStorage(dict):
DEFAULT_KEY = "default"
def get_default(self) -> Stream:
result = self.get(self.DEFAULT_KEY, None)
return result
def push(self, key: str, value: Stream):
if not len(self):
self[self.DEFAULT_KEY] = value
self[key] = value
_stream_storage = StreamStorage()
def get_stream(name: str = None) -> Stream:
stream = _stream_storage.get(name) if name else _stream_storage.get_default()
return stream
|
anti1869/sunhead
|
src/sunhead/workers/http/ext/runtime.py
|
RuntimeStatsView.get
|
python
|
async def get(self):
context_data = self.get_context_data()
context_data.update(getattr(self.request.app, "stats", {}))
response = self.json_response(context_data)
return response
|
Printing runtime statistics in JSON
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/workers/http/ext/runtime.py#L56-L63
| null |
class RuntimeStatsView(JSONView):
def get_context_data(self):
# TODO: Add more useful stuff here
context_data = {
"pkg_version": getattr(settings, "PKG_VERSION", None),
}
return context_data
|
anti1869/sunhead
|
src/sunhead/cli/banners.py
|
print_banner
|
python
|
def print_banner(filename: str, template: str = DEFAULT_BANNER_TEMPLATE) -> None:
if not os.path.isfile(filename):
logger.warning("Can't find logo banner at %s", filename)
return
with open(filename, "r") as f:
banner = f.read()
formatted_banner = template.format(banner)
print(formatted_banner)
|
Print text file to output.
:param filename: Which file to print.
:param template: Format string which specified banner arrangement.
:return: Does not return anything
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/cli/banners.py#L15-L31
| null |
"""
Cozy utility to print ASCII banners and other stuff for your CLI app.
"""
import logging
import os
logger = logging.getLogger(__name__)
DEFAULT_BANNER_TEMPLATE = "\n\n{}\n"
|
anti1869/sunhead
|
src/sunhead/utils.py
|
get_class_by_path
|
python
|
def get_class_by_path(class_path: str, is_module: Optional[bool] = False) -> type:
if is_module:
try:
backend_module = importlib.import_module(class_path)
except ImportError:
logger.warning("Can't import backend with name `%s`", class_path)
raise
else:
return backend_module
module_name, class_name = class_path.rsplit('.', 1)
try:
backend_module = importlib.import_module(module_name)
except ImportError:
logger.error("Can't import backend with name `%s`", module_name)
raise
try:
backend_class = getattr(backend_module, class_name)
except AttributeError:
logger.error("Can't get backend class `%s` from `%s`", class_name, module_name)
raise
return backend_class
|
Get class by its name within a package structure.
:param class_path: E.g. brandt.some.module.ClassName
:param is_module: Whether last item is module rather than class name
:return: Class ready to be instantiated.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L26-L58
| null |
"""
Helper utilities that everyone needs.
"""
import asyncio
from collections import namedtuple, OrderedDict
from datetime import datetime
from enum import Enum
import importlib
import logging
import pkgutil
import re
from typing import Sequence, Tuple, Dict, Optional, Any
from dateutil import tz
from sunhead.conf import settings
logger = logging.getLogger(__name__)
ModuleDescription = namedtuple("ModuleDescription", "name path is_package")
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]:
"""Get list of submodules for some package by its path. E.g ``pkg.subpackage``"""
pkg = importlib.import_module(package_path)
subs = (
ModuleDescription(
name=modname,
path="{}.{}".format(package_path, modname), is_package=ispkg
)
for importer, modname, ispkg in pkgutil.iter_modules(pkg.__path__)
)
result = tuple(subs)
return result
def camel_to_underscore(kls: type) -> str:
parts = re.findall('[A-Z][^A-Z]*', kls.__name__)
name = '_'.join(part.lower() for part in parts)
return name
# LibraryObjectDefinition = namedtuple('NamedObjectDefinition', 'name kls')
# TODO: Activate when will implement recursive submodule list
# def get_classes_from_libraries(object_libraries, base_class) -> Set[LibraryObjectDefinition]:
# definitions = set()
# for path in object_libraries:
# modules = get_submodule_list(path, recursive=True)
# for module in map(lambda x: get_backend_class(x.path, is_module=True), modules):
# block_classes = get_subclasses_from_module(module, base_class)
# for kls in block_classes:
# definitions.add(
# LibraryObjectDefinition(camel_to_underscore(kls), kls)
# )
# return definitions
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results
def positive_int(integer_string: str, strict: bool = False, cutoff: Optional[int] = None) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def get_configured_tz_now() -> datetime:
tzname = getattr(settings, 'TZ', 'Europe/Moscow')
tzinfo = tz.gettz(tzname)
now = datetime.now(tzinfo)
return now
def choices_from_enum(source: Enum) -> Tuple[Tuple[Any, str], ...]:
"""
Makes tuple to use in Django's Fields ``choices`` attribute.
Enum members names will be titles for the choices.
:param source: Enum to process.
:return: Tuple to put into ``choices``
"""
result = tuple((s.value, s.name.title()) for s in source)
return result
|
anti1869/sunhead
|
src/sunhead/utils.py
|
get_submodule_list
|
python
|
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]:
pkg = importlib.import_module(package_path)
subs = (
ModuleDescription(
name=modname,
path="{}.{}".format(package_path, modname), is_package=ispkg
)
for importer, modname, ispkg in pkgutil.iter_modules(pkg.__path__)
)
result = tuple(subs)
return result
|
Get list of submodules for some package by its path. E.g ``pkg.subpackage``
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L61-L73
| null |
"""
Helper utilities that everyone needs.
"""
import asyncio
from collections import namedtuple, OrderedDict
from datetime import datetime
from enum import Enum
import importlib
import logging
import pkgutil
import re
from typing import Sequence, Tuple, Dict, Optional, Any
from dateutil import tz
from sunhead.conf import settings
logger = logging.getLogger(__name__)
ModuleDescription = namedtuple("ModuleDescription", "name path is_package")
def get_class_by_path(class_path: str, is_module: Optional[bool] = False) -> type:
"""
Get class by its name within a package structure.
:param class_path: E.g. brandt.some.module.ClassName
:param is_module: Whether last item is module rather than class name
:return: Class ready to be instantiated.
"""
if is_module:
try:
backend_module = importlib.import_module(class_path)
except ImportError:
logger.warning("Can't import backend with name `%s`", class_path)
raise
else:
return backend_module
module_name, class_name = class_path.rsplit('.', 1)
try:
backend_module = importlib.import_module(module_name)
except ImportError:
logger.error("Can't import backend with name `%s`", module_name)
raise
try:
backend_class = getattr(backend_module, class_name)
except AttributeError:
logger.error("Can't get backend class `%s` from `%s`", class_name, module_name)
raise
return backend_class
def camel_to_underscore(kls: type) -> str:
parts = re.findall('[A-Z][^A-Z]*', kls.__name__)
name = '_'.join(part.lower() for part in parts)
return name
# LibraryObjectDefinition = namedtuple('NamedObjectDefinition', 'name kls')
# TODO: Activate when will implement recursive submodule list
# def get_classes_from_libraries(object_libraries, base_class) -> Set[LibraryObjectDefinition]:
# definitions = set()
# for path in object_libraries:
# modules = get_submodule_list(path, recursive=True)
# for module in map(lambda x: get_backend_class(x.path, is_module=True), modules):
# block_classes = get_subclasses_from_module(module, base_class)
# for kls in block_classes:
# definitions.add(
# LibraryObjectDefinition(camel_to_underscore(kls), kls)
# )
# return definitions
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results
def positive_int(integer_string: str, strict: bool = False, cutoff: Optional[int] = None) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def get_configured_tz_now() -> datetime:
tzname = getattr(settings, 'TZ', 'Europe/Moscow')
tzinfo = tz.gettz(tzname)
now = datetime.now(tzinfo)
return now
def choices_from_enum(source: Enum) -> Tuple[Tuple[Any, str], ...]:
"""
Makes tuple to use in Django's Fields ``choices`` attribute.
Enum members names will be titles for the choices.
:param source: Enum to process.
:return: Tuple to put into ``choices``
"""
result = tuple((s.value, s.name.title()) for s in source)
return result
|
anti1869/sunhead
|
src/sunhead/utils.py
|
parallel_results
|
python
|
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results
|
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L99-L113
| null |
"""
Helper utilities that everyone needs.
"""
import asyncio
from collections import namedtuple, OrderedDict
from datetime import datetime
from enum import Enum
import importlib
import logging
import pkgutil
import re
from typing import Sequence, Tuple, Dict, Optional, Any
from dateutil import tz
from sunhead.conf import settings
logger = logging.getLogger(__name__)
ModuleDescription = namedtuple("ModuleDescription", "name path is_package")
def get_class_by_path(class_path: str, is_module: Optional[bool] = False) -> type:
"""
Get class by its name within a package structure.
:param class_path: E.g. brandt.some.module.ClassName
:param is_module: Whether last item is module rather than class name
:return: Class ready to be instantiated.
"""
if is_module:
try:
backend_module = importlib.import_module(class_path)
except ImportError:
logger.warning("Can't import backend with name `%s`", class_path)
raise
else:
return backend_module
module_name, class_name = class_path.rsplit('.', 1)
try:
backend_module = importlib.import_module(module_name)
except ImportError:
logger.error("Can't import backend with name `%s`", module_name)
raise
try:
backend_class = getattr(backend_module, class_name)
except AttributeError:
logger.error("Can't get backend class `%s` from `%s`", class_name, module_name)
raise
return backend_class
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]:
"""Get list of submodules for some package by its path. E.g ``pkg.subpackage``"""
pkg = importlib.import_module(package_path)
subs = (
ModuleDescription(
name=modname,
path="{}.{}".format(package_path, modname), is_package=ispkg
)
for importer, modname, ispkg in pkgutil.iter_modules(pkg.__path__)
)
result = tuple(subs)
return result
def camel_to_underscore(kls: type) -> str:
parts = re.findall('[A-Z][^A-Z]*', kls.__name__)
name = '_'.join(part.lower() for part in parts)
return name
# LibraryObjectDefinition = namedtuple('NamedObjectDefinition', 'name kls')
# TODO: Activate when will implement recursive submodule list
# def get_classes_from_libraries(object_libraries, base_class) -> Set[LibraryObjectDefinition]:
# definitions = set()
# for path in object_libraries:
# modules = get_submodule_list(path, recursive=True)
# for module in map(lambda x: get_backend_class(x.path, is_module=True), modules):
# block_classes = get_subclasses_from_module(module, base_class)
# for kls in block_classes:
# definitions.add(
# LibraryObjectDefinition(camel_to_underscore(kls), kls)
# )
# return definitions
def positive_int(integer_string: str, strict: bool = False, cutoff: Optional[int] = None) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def get_configured_tz_now() -> datetime:
tzname = getattr(settings, 'TZ', 'Europe/Moscow')
tzinfo = tz.gettz(tzname)
now = datetime.now(tzinfo)
return now
def choices_from_enum(source: Enum) -> Tuple[Tuple[Any, str], ...]:
"""
Makes tuple to use in Django's Fields ``choices`` attribute.
Enum members names will be titles for the choices.
:param source: Enum to process.
:return: Tuple to put into ``choices``
"""
result = tuple((s.value, s.name.title()) for s in source)
return result
|
anti1869/sunhead
|
src/sunhead/utils.py
|
positive_int
|
python
|
def positive_int(integer_string: str, strict: bool = False, cutoff: Optional[int] = None) -> int:
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
|
Cast a string to a strictly positive integer.
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L116-L125
| null |
"""
Helper utilities that everyone needs.
"""
import asyncio
from collections import namedtuple, OrderedDict
from datetime import datetime
from enum import Enum
import importlib
import logging
import pkgutil
import re
from typing import Sequence, Tuple, Dict, Optional, Any
from dateutil import tz
from sunhead.conf import settings
logger = logging.getLogger(__name__)
ModuleDescription = namedtuple("ModuleDescription", "name path is_package")
def get_class_by_path(class_path: str, is_module: Optional[bool] = False) -> type:
"""
Get class by its name within a package structure.
:param class_path: E.g. brandt.some.module.ClassName
:param is_module: Whether last item is module rather than class name
:return: Class ready to be instantiated.
"""
if is_module:
try:
backend_module = importlib.import_module(class_path)
except ImportError:
logger.warning("Can't import backend with name `%s`", class_path)
raise
else:
return backend_module
module_name, class_name = class_path.rsplit('.', 1)
try:
backend_module = importlib.import_module(module_name)
except ImportError:
logger.error("Can't import backend with name `%s`", module_name)
raise
try:
backend_class = getattr(backend_module, class_name)
except AttributeError:
logger.error("Can't get backend class `%s` from `%s`", class_name, module_name)
raise
return backend_class
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]:
"""Get list of submodules for some package by its path. E.g ``pkg.subpackage``"""
pkg = importlib.import_module(package_path)
subs = (
ModuleDescription(
name=modname,
path="{}.{}".format(package_path, modname), is_package=ispkg
)
for importer, modname, ispkg in pkgutil.iter_modules(pkg.__path__)
)
result = tuple(subs)
return result
def camel_to_underscore(kls: type) -> str:
parts = re.findall('[A-Z][^A-Z]*', kls.__name__)
name = '_'.join(part.lower() for part in parts)
return name
# LibraryObjectDefinition = namedtuple('NamedObjectDefinition', 'name kls')
# TODO: Activate when will implement recursive submodule list
# def get_classes_from_libraries(object_libraries, base_class) -> Set[LibraryObjectDefinition]:
# definitions = set()
# for path in object_libraries:
# modules = get_submodule_list(path, recursive=True)
# for module in map(lambda x: get_backend_class(x.path, is_module=True), modules):
# block_classes = get_subclasses_from_module(module, base_class)
# for kls in block_classes:
# definitions.add(
# LibraryObjectDefinition(camel_to_underscore(kls), kls)
# )
# return definitions
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results
def get_configured_tz_now() -> datetime:
tzname = getattr(settings, 'TZ', 'Europe/Moscow')
tzinfo = tz.gettz(tzname)
now = datetime.now(tzinfo)
return now
def choices_from_enum(source: Enum) -> Tuple[Tuple[Any, str], ...]:
"""
Makes tuple to use in Django's Fields ``choices`` attribute.
Enum members names will be titles for the choices.
:param source: Enum to process.
:return: Tuple to put into ``choices``
"""
result = tuple((s.value, s.name.title()) for s in source)
return result
|
anti1869/sunhead
|
src/sunhead/utils.py
|
choices_from_enum
|
python
|
def choices_from_enum(source: Enum) -> Tuple[Tuple[Any, str], ...]:
result = tuple((s.value, s.name.title()) for s in source)
return result
|
Makes tuple to use in Django's Fields ``choices`` attribute.
Enum members names will be titles for the choices.
:param source: Enum to process.
:return: Tuple to put into ``choices``
|
train
|
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L135-L144
| null |
"""
Helper utilities that everyone needs.
"""
import asyncio
from collections import namedtuple, OrderedDict
from datetime import datetime
from enum import Enum
import importlib
import logging
import pkgutil
import re
from typing import Sequence, Tuple, Dict, Optional, Any
from dateutil import tz
from sunhead.conf import settings
logger = logging.getLogger(__name__)
ModuleDescription = namedtuple("ModuleDescription", "name path is_package")
def get_class_by_path(class_path: str, is_module: Optional[bool] = False) -> type:
"""
Get class by its name within a package structure.
:param class_path: E.g. brandt.some.module.ClassName
:param is_module: Whether last item is module rather than class name
:return: Class ready to be instantiated.
"""
if is_module:
try:
backend_module = importlib.import_module(class_path)
except ImportError:
logger.warning("Can't import backend with name `%s`", class_path)
raise
else:
return backend_module
module_name, class_name = class_path.rsplit('.', 1)
try:
backend_module = importlib.import_module(module_name)
except ImportError:
logger.error("Can't import backend with name `%s`", module_name)
raise
try:
backend_class = getattr(backend_module, class_name)
except AttributeError:
logger.error("Can't get backend class `%s` from `%s`", class_name, module_name)
raise
return backend_class
def get_submodule_list(package_path: str) -> Tuple[ModuleDescription, ...]:
"""Get list of submodules for some package by its path. E.g ``pkg.subpackage``"""
pkg = importlib.import_module(package_path)
subs = (
ModuleDescription(
name=modname,
path="{}.{}".format(package_path, modname), is_package=ispkg
)
for importer, modname, ispkg in pkgutil.iter_modules(pkg.__path__)
)
result = tuple(subs)
return result
def camel_to_underscore(kls: type) -> str:
parts = re.findall('[A-Z][^A-Z]*', kls.__name__)
name = '_'.join(part.lower() for part in parts)
return name
# LibraryObjectDefinition = namedtuple('NamedObjectDefinition', 'name kls')
# TODO: Activate when will implement recursive submodule list
# def get_classes_from_libraries(object_libraries, base_class) -> Set[LibraryObjectDefinition]:
# definitions = set()
# for path in object_libraries:
# modules = get_submodule_list(path, recursive=True)
# for module in map(lambda x: get_backend_class(x.path, is_module=True), modules):
# block_classes = get_subclasses_from_module(module, base_class)
# for kls in block_classes:
# definitions.add(
# LibraryObjectDefinition(camel_to_underscore(kls), kls)
# )
# return definitions
async def parallel_results(future_map: Sequence[Tuple]) -> Dict:
"""
Run parallel execution of futures and return mapping of their results to the provided keys.
Just a neat shortcut around ``asyncio.gather()``
:param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) )
:return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
"""
ctx_methods = OrderedDict(future_map)
fs = list(ctx_methods.values())
results = await asyncio.gather(*fs)
results = {
key: results[idx] for idx, key in enumerate(ctx_methods.keys())
}
return results
def positive_int(integer_string: str, strict: bool = False, cutoff: Optional[int] = None) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def get_configured_tz_now() -> datetime:
tzname = getattr(settings, 'TZ', 'Europe/Moscow')
tzinfo = tz.gettz(tzname)
now = datetime.now(tzinfo)
return now
|
svasilev94/GraphLibrary
|
graphlibrary/dijkstra.py
|
dijkstra
|
python
|
def dijkstra(G, start, weight='weight'):
"""
Compute shortest path length between satrt
and all other reachable nodes for a weight graph.
return -> ({vertex: weight form start, }, {vertex: predeseccor, })
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
visited = {start: 0}
path = {}
vertices = set(G.vertices.keys())
while vertices:
min_vertex = None
for vertex in vertices:
if vertex in visited:
if min_vertex is None or visited[vertex] < visited[min_vertex]:
min_vertex = vertex
if min_vertex is None:
break
vertices.remove(min_vertex)
current_weight = visited[min_vertex]
for edge in G.vertices[min_vertex]:
edge_weight = current_weight + G.edges[(min_vertex, edge)][weight]
if edge not in visited or edge_weight < visited[edge]:
visited[edge] = edge_weight
path[edge] = min_vertex
return visited, path
|
Compute shortest path length between satrt
and all other reachable nodes for a weight graph.
return -> ({vertex: weight form start, }, {vertex: predeseccor, })
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/dijkstra.py#L6-L32
| null |
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def dijkstra(G, start, weight='weight'):
"""
Compute shortest path length between satrt
and all other reachable nodes for a weight graph.
return -> ({vertex: weight form start, }, {vertex: predeseccor, })
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
visited = {start: 0}
path = {}
vertices = set(G.vertices.keys())
while vertices:
min_vertex = None
for vertex in vertices:
if vertex in visited:
if min_vertex is None or visited[vertex] < visited[min_vertex]:
min_vertex = vertex
if min_vertex is None:
break
vertices.remove(min_vertex)
current_weight = visited[min_vertex]
for edge in G.vertices[min_vertex]:
edge_weight = current_weight + G.edges[(min_vertex, edge)][weight]
if edge not in visited or edge_weight < visited[edge]:
visited[edge] = edge_weight
path[edge] = min_vertex
return visited, path
def dijkstra_single_path_length(G, start, end):
"""
Compute shortest path length between satrt
and end for a weight graph. return -> (length, [path])
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
dijkstra_data = dijkstra(G, start)
length = dijkstra_data[0][end]
path = [end]
current = end
while current is not start:
for vertex in dijkstra_data[1]:
if vertex is current:
path.append(dijkstra_data[1][vertex])
current = dijkstra_data[1][vertex]
break
return length, path
|
svasilev94/GraphLibrary
|
graphlibrary/dijkstra.py
|
dijkstra_single_path_length
|
python
|
def dijkstra_single_path_length(G, start, end):
"""
Compute shortest path length between satrt
and end for a weight graph. return -> (length, [path])
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
dijkstra_data = dijkstra(G, start)
length = dijkstra_data[0][end]
path = [end]
current = end
while current is not start:
for vertex in dijkstra_data[1]:
if vertex is current:
path.append(dijkstra_data[1][vertex])
current = dijkstra_data[1][vertex]
break
return length, path
|
Compute shortest path length between satrt
and end for a weight graph. return -> (length, [path])
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/dijkstra.py#L35-L54
|
[
"def dijkstra(G, start, weight='weight'):\n \"\"\"\n Compute shortest path length between satrt\n and all other reachable nodes for a weight graph.\n return -> ({vertex: weight form start, }, {vertex: predeseccor, })\n \"\"\"\n if start not in G.vertices:\n raise GraphInsertError(\"Vertex %s doesn't exist.\" % (start,))\n visited = {start: 0}\n path = {}\n vertices = set(G.vertices.keys())\n while vertices:\n min_vertex = None\n for vertex in vertices:\n if vertex in visited:\n if min_vertex is None or visited[vertex] < visited[min_vertex]:\n min_vertex = vertex\n if min_vertex is None:\n break\n vertices.remove(min_vertex)\n current_weight = visited[min_vertex]\n for edge in G.vertices[min_vertex]:\n edge_weight = current_weight + G.edges[(min_vertex, edge)][weight]\n if edge not in visited or edge_weight < visited[edge]:\n visited[edge] = edge_weight\n path[edge] = min_vertex\n return visited, path\n"
] |
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def dijkstra(G, start, weight='weight'):
"""
Compute shortest path length between satrt
and all other reachable nodes for a weight graph.
return -> ({vertex: weight form start, }, {vertex: predeseccor, })
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
visited = {start: 0}
path = {}
vertices = set(G.vertices.keys())
while vertices:
min_vertex = None
for vertex in vertices:
if vertex in visited:
if min_vertex is None or visited[vertex] < visited[min_vertex]:
min_vertex = vertex
if min_vertex is None:
break
vertices.remove(min_vertex)
current_weight = visited[min_vertex]
for edge in G.vertices[min_vertex]:
edge_weight = current_weight + G.edges[(min_vertex, edge)][weight]
if edge not in visited or edge_weight < visited[edge]:
visited[edge] = edge_weight
path[edge] = min_vertex
return visited, path
def dijkstra_single_path_length(G, start, end):
"""
Compute shortest path length between satrt
and end for a weight graph. return -> (length, [path])
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
dijkstra_data = dijkstra(G, start)
length = dijkstra_data[0][end]
path = [end]
current = end
while current is not start:
for vertex in dijkstra_data[1]:
if vertex is current:
path.append(dijkstra_data[1][vertex])
current = dijkstra_data[1][vertex]
break
return length, path
|
svasilev94/GraphLibrary
|
graphlibrary/digraph.py
|
DiGraph.add_vertex
|
python
|
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
self.pred[vertex] = []
self.succ[vertex] = []
|
Add vertex and update vertex attributes
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/digraph.py#L25-L33
| null |
class DiGraph(Graph):
"""
Class for directed graphs
"""
def __init__(self):
"""
Initialize directed graphs
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u, v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
vertex predecessors dictionary -> {u: [predecessors], }
vertex successors dictionary -> {u: [successors], }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
self.pred = {}
self.succ = {}
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
self.pred[vertex] = []
self.succ[vertex] = []
def add_edge(self, u, v, **attr):
"""
Add an edge from u to v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
self.pred[u] = []
self.succ[u] = []
if v not in self.vertices:
self.vertices[v] = []
self.pred[v] = []
self.succ[v] = []
vertex = (u, v)
self.edges[vertex] = {}
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.pred[v].append(u)
self.succ[u].append(v)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex)
def remove_edge(self, u, v):
"""
Remove the edge from u to v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.pred[v].remove(u)
self.succ[u].remove(v)
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return True
def has_successor(self, u, v):
"""
Check if vertex u has successor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u])
def in_degree(self, vertex):
"""
Return the in-degree of a vertex
"""
try:
return len(self.pred[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def out_degree(self, vertex):
"""
Return the out-degree of a vertex
"""
try:
return len(self.succ[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
|
svasilev94/GraphLibrary
|
graphlibrary/digraph.py
|
DiGraph.add_edge
|
python
|
def add_edge(self, u, v, **attr):
"""
Add an edge from u to v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
self.pred[u] = []
self.succ[u] = []
if v not in self.vertices:
self.vertices[v] = []
self.pred[v] = []
self.succ[v] = []
vertex = (u, v)
self.edges[vertex] = {}
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.pred[v].append(u)
self.succ[u].append(v)
|
Add an edge from u to v and update edge attributes
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/digraph.py#L35-L52
| null |
class DiGraph(Graph):
"""
Class for directed graphs
"""
def __init__(self):
"""
Initialize directed graphs
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u, v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
vertex predecessors dictionary -> {u: [predecessors], }
vertex successors dictionary -> {u: [successors], }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
self.pred = {}
self.succ = {}
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
self.pred[vertex] = []
self.succ[vertex] = []
def add_edge(self, u, v, **attr):
"""
Add an edge from u to v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
self.pred[u] = []
self.succ[u] = []
if v not in self.vertices:
self.vertices[v] = []
self.pred[v] = []
self.succ[v] = []
vertex = (u, v)
self.edges[vertex] = {}
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.pred[v].append(u)
self.succ[u].append(v)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex)
def remove_edge(self, u, v):
"""
Remove the edge from u to v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.pred[v].remove(u)
self.succ[u].remove(v)
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return True
def has_successor(self, u, v):
"""
Check if vertex u has successor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u])
def in_degree(self, vertex):
"""
Return the in-degree of a vertex
"""
try:
return len(self.pred[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def out_degree(self, vertex):
"""
Return the out-degree of a vertex
"""
try:
return len(self.succ[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
|
svasilev94/GraphLibrary
|
graphlibrary/digraph.py
|
DiGraph.remove_vertex
|
python
|
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex)
|
Remove vertex from G
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/digraph.py#L54-L79
| null |
class DiGraph(Graph):
"""
Class for directed graphs
"""
def __init__(self):
"""
Initialize directed graphs
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u, v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
vertex predecessors dictionary -> {u: [predecessors], }
vertex successors dictionary -> {u: [successors], }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
self.pred = {}
self.succ = {}
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
self.pred[vertex] = []
self.succ[vertex] = []
def add_edge(self, u, v, **attr):
"""
Add an edge from u to v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
self.pred[u] = []
self.succ[u] = []
if v not in self.vertices:
self.vertices[v] = []
self.pred[v] = []
self.succ[v] = []
vertex = (u, v)
self.edges[vertex] = {}
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.pred[v].append(u)
self.succ[u].append(v)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex)
def remove_edge(self, u, v):
"""
Remove the edge from u to v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.pred[v].remove(u)
self.succ[u].remove(v)
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return True
def has_successor(self, u, v):
"""
Check if vertex u has successor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u])
def in_degree(self, vertex):
"""
Return the in-degree of a vertex
"""
try:
return len(self.pred[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def out_degree(self, vertex):
"""
Return the out-degree of a vertex
"""
try:
return len(self.succ[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
|
svasilev94/GraphLibrary
|
graphlibrary/digraph.py
|
DiGraph.has_successor
|
python
|
def has_successor(self, u, v):
"""
Check if vertex u has successor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return (u in self.succ and v in self.succ[u])
|
Check if vertex u has successor v
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/digraph.py#L99-L105
| null |
class DiGraph(Graph):
"""
Class for directed graphs
"""
def __init__(self):
"""
Initialize directed graphs
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u, v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
vertex predecessors dictionary -> {u: [predecessors], }
vertex successors dictionary -> {u: [successors], }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
self.pred = {}
self.succ = {}
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
self.pred[vertex] = []
self.succ[vertex] = []
def add_edge(self, u, v, **attr):
"""
Add an edge from u to v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
self.pred[u] = []
self.succ[u] = []
if v not in self.vertices:
self.vertices[v] = []
self.pred[v] = []
self.succ[v] = []
vertex = (u, v)
self.edges[vertex] = {}
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.pred[v].append(u)
self.succ[u].append(v)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex)
def remove_edge(self, u, v):
"""
Remove the edge from u to v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.pred[v].remove(u)
self.succ[u].remove(v)
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return True
def has_successor(self, u, v):
"""
Check if vertex u has successor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u])
def in_degree(self, vertex):
"""
Return the in-degree of a vertex
"""
try:
return len(self.pred[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def out_degree(self, vertex):
"""
Return the out-degree of a vertex
"""
try:
return len(self.succ[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
|
svasilev94/GraphLibrary
|
graphlibrary/digraph.py
|
DiGraph.has_predecessor
|
python
|
def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u])
|
Check if vertex u has predecessor v
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/digraph.py#L107-L113
| null |
class DiGraph(Graph):
"""
Class for directed graphs
"""
def __init__(self):
"""
Initialize directed graphs
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u, v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
vertex predecessors dictionary -> {u: [predecessors], }
vertex successors dictionary -> {u: [successors], }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
self.pred = {}
self.succ = {}
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
self.pred[vertex] = []
self.succ[vertex] = []
def add_edge(self, u, v, **attr):
"""
Add an edge from u to v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
self.pred[u] = []
self.succ[u] = []
if v not in self.vertices:
self.vertices[v] = []
self.pred[v] = []
self.succ[v] = []
vertex = (u, v)
self.edges[vertex] = {}
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.pred[v].append(u)
self.succ[u].append(v)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex)
def remove_edge(self, u, v):
"""
Remove the edge from u to v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.pred[v].remove(u)
self.succ[u].remove(v)
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return True
def has_successor(self, u, v):
"""
Check if vertex u has successor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""
Check if vertex u has predecessor v
"""
if u not in self.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (u,))
return(u in self.pred and v in self.pred[u])
def in_degree(self, vertex):
"""
Return the in-degree of a vertex
"""
try:
return len(self.pred[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def out_degree(self, vertex):
"""
Return the out-degree of a vertex
"""
try:
return len(self.succ[vertex])
except:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
|
svasilev94/GraphLibrary
|
graphlibrary/paths.py
|
find_all_paths
|
python
|
def find_all_paths(G, start, end, path=[]):
"""
Find all paths between vertices start and end in graph.
"""
path = path + [start]
if start == end:
return [path]
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
paths = []
for vertex in G.vertices[start]:
if vertex not in path:
newpaths = find_all_paths(G, vertex, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
|
Find all paths between vertices start and end in graph.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/paths.py#L6-L23
|
[
"def find_all_paths(G, start, end, path=[]):\n \"\"\"\n Find all paths between vertices start and end in graph.\n \"\"\"\n path = path + [start]\n if start == end:\n return [path]\n if start not in G.vertices:\n raise GraphInsertError(\"Vertex %s doesn't exist.\" % (start,))\n if end not in G.vertices:\n raise GraphInsertError(\"Vertex %s doesn't exist.\" % (end,))\n paths = []\n for vertex in G.vertices[start]:\n if vertex not in path:\n newpaths = find_all_paths(G, vertex, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths\n"
] |
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def find_all_paths(G, start, end, path=[]):
"""
Find all paths between vertices start and end in graph.
"""
path = path + [start]
if start == end:
return [path]
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
paths = []
for vertex in G.vertices[start]:
if vertex not in path:
newpaths = find_all_paths(G, vertex, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_shortest_path(G, start, end):
"""
Find shortest path between vertices start and end in undirectetd graph.
"""
if start == end:
return []
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
if end not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (end,))
paths = find_all_paths(G, start, end)
shortest = list(G.vertices.keys())
for path in paths:
if len(path) < len(shortest):
shortest = path
return shortest
|
svasilev94/GraphLibrary
|
graphlibrary/first_search.py
|
BFS
|
python
|
def BFS(G, start):
"""
Algorithm for breadth-first searching the vertices of a graph.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
color = {}
pred = {}
dist = {}
queue = Queue()
queue.put(start)
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
dist[vertex] = 0
while queue.qsize() > 0:
current = queue.get()
for neighbor in G.vertices[current]:
if color[neighbor] == 'white':
color[neighbor] = 'grey'
pred[neighbor] = current
dist[neighbor] = dist[current] + 1
queue.put(neighbor)
color[current] = 'black'
return pred
|
Algorithm for breadth-first searching the vertices of a graph.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/first_search.py#L8-L32
| null |
from queue import Queue
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def BFS(G, start):
"""
Algorithm for breadth-first searching the vertices of a graph.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
color = {}
pred = {}
dist = {}
queue = Queue()
queue.put(start)
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
dist[vertex] = 0
while queue.qsize() > 0:
current = queue.get()
for neighbor in G.vertices[current]:
if color[neighbor] == 'white':
color[neighbor] = 'grey'
pred[neighbor] = current
dist[neighbor] = dist[current] + 1
queue.put(neighbor)
color[current] = 'black'
return pred
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
def DFS(G):
"""
Algorithm for depth-first searching the vertices of a graph.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
color = {}
pred = {}
reach = {}
finish = {}
def DFSvisit(G, current, time):
color[current] = 'grey'
time += 1
reach[current] = time
for vertex in G.vertices[current]:
if color[vertex] == 'white':
pred[vertex] = current
time = DFSvisit(G, vertex, time)
color[current] = 'black'
time += 1
finish[current] = time
return time
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
reach[vertex] = 0
finish[vertex] = 0
time = 0
for vertex in G.vertices:
if color[vertex] == 'white':
time = DFSvisit(G, vertex, time)
# Dictionary for vertex data after DFS
# -> vertex_data = {vertex: (predecessor, reach, finish), }
vertex_data = {}
for vertex in G.vertices:
vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex])
return vertex_data
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
svasilev94/GraphLibrary
|
graphlibrary/first_search.py
|
BFS_Tree
|
python
|
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
Return an oriented tree constructed from bfs starting at 'start'.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/first_search.py#L35-L51
|
[
"def BFS(G, start):\n \"\"\"\n Algorithm for breadth-first searching the vertices of a graph.\n \"\"\"\n if start not in G.vertices:\n raise GraphInsertError(\"Vertex %s doesn't exist.\" % (start,))\n color = {}\n pred = {}\n dist = {}\n queue = Queue()\n queue.put(start)\n for vertex in G.vertices:\n color[vertex] = 'white'\n pred[vertex] = None\n dist[vertex] = 0\n while queue.qsize() > 0:\n current = queue.get()\n for neighbor in G.vertices[current]:\n if color[neighbor] == 'white':\n color[neighbor] = 'grey'\n pred[neighbor] = current\n dist[neighbor] = dist[current] + 1\n queue.put(neighbor)\n color[current] = 'black'\n return pred\n",
"def add_edge(self, u, v, **attr):\n \"\"\"\n Add an edge from u to v and update edge attributes\n \"\"\"\n if u not in self.vertices:\n self.vertices[u] = []\n self.pred[u] = []\n self.succ[u] = []\n if v not in self.vertices:\n self.vertices[v] = []\n self.pred[v] = []\n self.succ[v] = []\n vertex = (u, v)\n self.edges[vertex] = {}\n self.edges[vertex].update(attr)\n self.vertices[u].append(v)\n self.pred[v].append(u)\n self.succ[u].append(v)\n"
] |
from queue import Queue
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def BFS(G, start):
"""
Algorithm for breadth-first searching the vertices of a graph.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
color = {}
pred = {}
dist = {}
queue = Queue()
queue.put(start)
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
dist[vertex] = 0
while queue.qsize() > 0:
current = queue.get()
for neighbor in G.vertices[current]:
if color[neighbor] == 'white':
color[neighbor] = 'grey'
pred[neighbor] = current
dist[neighbor] = dist[current] + 1
queue.put(neighbor)
color[current] = 'black'
return pred
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
def DFS(G):
"""
Algorithm for depth-first searching the vertices of a graph.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
color = {}
pred = {}
reach = {}
finish = {}
def DFSvisit(G, current, time):
color[current] = 'grey'
time += 1
reach[current] = time
for vertex in G.vertices[current]:
if color[vertex] == 'white':
pred[vertex] = current
time = DFSvisit(G, vertex, time)
color[current] = 'black'
time += 1
finish[current] = time
return time
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
reach[vertex] = 0
finish[vertex] = 0
time = 0
for vertex in G.vertices:
if color[vertex] == 'white':
time = DFSvisit(G, vertex, time)
# Dictionary for vertex data after DFS
# -> vertex_data = {vertex: (predecessor, reach, finish), }
vertex_data = {}
for vertex in G.vertices:
vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex])
return vertex_data
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
svasilev94/GraphLibrary
|
graphlibrary/first_search.py
|
DFS
|
python
|
def DFS(G):
"""
Algorithm for depth-first searching the vertices of a graph.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
color = {}
pred = {}
reach = {}
finish = {}
def DFSvisit(G, current, time):
color[current] = 'grey'
time += 1
reach[current] = time
for vertex in G.vertices[current]:
if color[vertex] == 'white':
pred[vertex] = current
time = DFSvisit(G, vertex, time)
color[current] = 'black'
time += 1
finish[current] = time
return time
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
reach[vertex] = 0
finish[vertex] = 0
time = 0
for vertex in G.vertices:
if color[vertex] == 'white':
time = DFSvisit(G, vertex, time)
# Dictionary for vertex data after DFS
# -> vertex_data = {vertex: (predecessor, reach, finish), }
vertex_data = {}
for vertex in G.vertices:
vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex])
return vertex_data
|
Algorithm for depth-first searching the vertices of a graph.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/first_search.py#L54-L92
|
[
"def DFSvisit(G, current, time):\n color[current] = 'grey'\n time += 1\n reach[current] = time\n for vertex in G.vertices[current]:\n if color[vertex] == 'white':\n pred[vertex] = current\n time = DFSvisit(G, vertex, time)\n color[current] = 'black'\n time += 1\n finish[current] = time\n return time\n"
] |
from queue import Queue
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def BFS(G, start):
"""
Algorithm for breadth-first searching the vertices of a graph.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
color = {}
pred = {}
dist = {}
queue = Queue()
queue.put(start)
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
dist[vertex] = 0
while queue.qsize() > 0:
current = queue.get()
for neighbor in G.vertices[current]:
if color[neighbor] == 'white':
color[neighbor] = 'grey'
pred[neighbor] = current
dist[neighbor] = dist[current] + 1
queue.put(neighbor)
color[current] = 'black'
return pred
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
def DFS(G):
"""
Algorithm for depth-first searching the vertices of a graph.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
color = {}
pred = {}
reach = {}
finish = {}
def DFSvisit(G, current, time):
color[current] = 'grey'
time += 1
reach[current] = time
for vertex in G.vertices[current]:
if color[vertex] == 'white':
pred[vertex] = current
time = DFSvisit(G, vertex, time)
color[current] = 'black'
time += 1
finish[current] = time
return time
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
reach[vertex] = 0
finish[vertex] = 0
time = 0
for vertex in G.vertices:
if color[vertex] == 'white':
time = DFSvisit(G, vertex, time)
# Dictionary for vertex data after DFS
# -> vertex_data = {vertex: (predecessor, reach, finish), }
vertex_data = {}
for vertex in G.vertices:
vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex])
return vertex_data
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
svasilev94/GraphLibrary
|
graphlibrary/first_search.py
|
DFS_Tree
|
python
|
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
Return an oriented tree constructed from dfs.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/first_search.py#L95-L116
|
[
"def DFS(G):\n \"\"\"\n Algorithm for depth-first searching the vertices of a graph.\n \"\"\"\n if not G.vertices:\n raise GraphInsertError(\"This graph have no vertices.\")\n color = {}\n pred = {}\n reach = {}\n finish = {}\n\n def DFSvisit(G, current, time):\n color[current] = 'grey'\n time += 1\n reach[current] = time\n for vertex in G.vertices[current]:\n if color[vertex] == 'white':\n pred[vertex] = current\n time = DFSvisit(G, vertex, time)\n color[current] = 'black'\n time += 1\n finish[current] = time\n return time\n\n for vertex in G.vertices:\n color[vertex] = 'white'\n pred[vertex] = None\n reach[vertex] = 0\n finish[vertex] = 0\n time = 0\n for vertex in G.vertices:\n if color[vertex] == 'white':\n time = DFSvisit(G, vertex, time)\n # Dictionary for vertex data after DFS\n # -> vertex_data = {vertex: (predecessor, reach, finish), }\n vertex_data = {}\n for vertex in G.vertices:\n vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex])\n return vertex_data\n",
"def add_edge(self, u, v, **attr):\n \"\"\"\n Add an edge from u to v and update edge attributes\n \"\"\"\n if u not in self.vertices:\n self.vertices[u] = []\n self.pred[u] = []\n self.succ[u] = []\n if v not in self.vertices:\n self.vertices[v] = []\n self.pred[v] = []\n self.succ[v] = []\n vertex = (u, v)\n self.edges[vertex] = {}\n self.edges[vertex].update(attr)\n self.vertices[u].append(v)\n self.pred[v].append(u)\n self.succ[u].append(v)\n"
] |
from queue import Queue
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def BFS(G, start):
"""
Algorithm for breadth-first searching the vertices of a graph.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
color = {}
pred = {}
dist = {}
queue = Queue()
queue.put(start)
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
dist[vertex] = 0
while queue.qsize() > 0:
current = queue.get()
for neighbor in G.vertices[current]:
if color[neighbor] == 'white':
color[neighbor] = 'grey'
pred[neighbor] = current
dist[neighbor] = dist[current] + 1
queue.put(neighbor)
color[current] = 'black'
return pred
def BFS_Tree(G, start):
"""
Return an oriented tree constructed from bfs starting at 'start'.
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = BFS(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
def DFS(G):
"""
Algorithm for depth-first searching the vertices of a graph.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
color = {}
pred = {}
reach = {}
finish = {}
def DFSvisit(G, current, time):
color[current] = 'grey'
time += 1
reach[current] = time
for vertex in G.vertices[current]:
if color[vertex] == 'white':
pred[vertex] = current
time = DFSvisit(G, vertex, time)
color[current] = 'black'
time += 1
finish[current] = time
return time
for vertex in G.vertices:
color[vertex] = 'white'
pred[vertex] = None
reach[vertex] = 0
finish[vertex] = 0
time = 0
for vertex in G.vertices:
if color[vertex] == 'white':
time = DFSvisit(G, vertex, time)
# Dictionary for vertex data after DFS
# -> vertex_data = {vertex: (predecessor, reach, finish), }
vertex_data = {}
for vertex in G.vertices:
vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex])
return vertex_data
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
svasilev94/GraphLibrary
|
graphlibrary/prim.py
|
connected_components
|
python
|
def connected_components(G):
"""
Check if G is connected and return list of sets. Every
set contains all vertices in one connected component.
"""
result = []
vertices = set(G.vertices)
while vertices:
n = vertices.pop()
group = {n}
queue = Queue()
queue.put(n)
while not queue.empty():
n = queue.get()
neighbors = set(G.vertices[n])
neighbors.difference_update(group)
vertices.difference_update(neighbors)
group.update(neighbors)
for element in neighbors:
queue.put(element)
result.append(group)
return result
|
Check if G is connected and return list of sets. Every
set contains all vertices in one connected component.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/prim.py#L8-L29
| null |
from queue import Queue
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def connected_components(G):
"""
Check if G is connected and return list of sets. Every
set contains all vertices in one connected component.
"""
result = []
vertices = set(G.vertices)
while vertices:
n = vertices.pop()
group = {n}
queue = Queue()
queue.put(n)
while not queue.empty():
n = queue.get()
neighbors = set(G.vertices[n])
neighbors.difference_update(group)
vertices.difference_update(neighbors)
group.update(neighbors)
for element in neighbors:
queue.put(element)
result.append(group)
return result
def popmin(pqueue, lowest):
lowest = 1000
keylowest = None
for element in pqueue:
if pqueue[element] < lowest:
lowest = pqueue[element]
keylowest = element
del pqueue[keylowest]
return keylowest
def prim(G, start, weight='weight'):
"""
Algorithm for finding a minimum spanning
tree for a weighted undirected graph.
"""
if len(connected_components(G)) != 1:
raise GraphInsertError("Prim algorithm work with connected graph only")
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = {}
key = {}
pqueue = {}
lowest = 0
for edge in G.edges:
if G.edges[edge][weight] > lowest:
lowest = G.edges[edge][weight]
for vertex in G.vertices:
pred[vertex] = None
key[vertex] = 2 * lowest
key[start] = 0
for vertex in G.vertices:
pqueue[vertex] = key[vertex]
while pqueue:
current = popmin(pqueue, lowest)
for neighbor in G.vertices[current]:
if (neighbor in pqueue and
G.edges[(current, neighbor)][weight] < key[neighbor]):
pred[neighbor] = current
key[neighbor] = G.edges[(current, neighbor)][weight]
pqueue[neighbor] = G.edges[(current, neighbor)][weight]
return pred
def prim_MST(G, start):
"""
Return an oriented tree constructed from prim algorithm starting at 'start'
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = prim(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
svasilev94/GraphLibrary
|
graphlibrary/prim.py
|
prim
|
python
|
def prim(G, start, weight='weight'):
"""
Algorithm for finding a minimum spanning
tree for a weighted undirected graph.
"""
if len(connected_components(G)) != 1:
raise GraphInsertError("Prim algorithm work with connected graph only")
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = {}
key = {}
pqueue = {}
lowest = 0
for edge in G.edges:
if G.edges[edge][weight] > lowest:
lowest = G.edges[edge][weight]
for vertex in G.vertices:
pred[vertex] = None
key[vertex] = 2 * lowest
key[start] = 0
for vertex in G.vertices:
pqueue[vertex] = key[vertex]
while pqueue:
current = popmin(pqueue, lowest)
for neighbor in G.vertices[current]:
if (neighbor in pqueue and
G.edges[(current, neighbor)][weight] < key[neighbor]):
pred[neighbor] = current
key[neighbor] = G.edges[(current, neighbor)][weight]
pqueue[neighbor] = G.edges[(current, neighbor)][weight]
return pred
|
Algorithm for finding a minimum spanning
tree for a weighted undirected graph.
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/prim.py#L43-L73
|
[
"def connected_components(G):\n \"\"\"\n Check if G is connected and return list of sets. Every\n set contains all vertices in one connected component.\n \"\"\"\n result = []\n vertices = set(G.vertices)\n while vertices:\n n = vertices.pop()\n group = {n}\n queue = Queue()\n queue.put(n)\n while not queue.empty():\n n = queue.get()\n neighbors = set(G.vertices[n])\n neighbors.difference_update(group)\n vertices.difference_update(neighbors)\n group.update(neighbors)\n for element in neighbors:\n queue.put(element)\n result.append(group)\n return result\n",
"def popmin(pqueue, lowest):\n lowest = 1000\n keylowest = None\n for element in pqueue:\n if pqueue[element] < lowest:\n lowest = pqueue[element]\n keylowest = element\n del pqueue[keylowest]\n return keylowest\n"
] |
from queue import Queue
from graphlibrary import digraph
from graphlibrary import graph
from graphlibrary.exceptions import *
def connected_components(G):
"""
Check if G is connected and return list of sets. Every
set contains all vertices in one connected component.
"""
result = []
vertices = set(G.vertices)
while vertices:
n = vertices.pop()
group = {n}
queue = Queue()
queue.put(n)
while not queue.empty():
n = queue.get()
neighbors = set(G.vertices[n])
neighbors.difference_update(group)
vertices.difference_update(neighbors)
group.update(neighbors)
for element in neighbors:
queue.put(element)
result.append(group)
return result
def popmin(pqueue, lowest):
lowest = 1000
keylowest = None
for element in pqueue:
if pqueue[element] < lowest:
lowest = pqueue[element]
keylowest = element
del pqueue[keylowest]
return keylowest
def prim(G, start, weight='weight'):
"""
Algorithm for finding a minimum spanning
tree for a weighted undirected graph.
"""
if len(connected_components(G)) != 1:
raise GraphInsertError("Prim algorithm work with connected graph only")
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = {}
key = {}
pqueue = {}
lowest = 0
for edge in G.edges:
if G.edges[edge][weight] > lowest:
lowest = G.edges[edge][weight]
for vertex in G.vertices:
pred[vertex] = None
key[vertex] = 2 * lowest
key[start] = 0
for vertex in G.vertices:
pqueue[vertex] = key[vertex]
while pqueue:
current = popmin(pqueue, lowest)
for neighbor in G.vertices[current]:
if (neighbor in pqueue and
G.edges[(current, neighbor)][weight] < key[neighbor]):
pred[neighbor] = current
key[neighbor] = G.edges[(current, neighbor)][weight]
pqueue[neighbor] = G.edges[(current, neighbor)][weight]
return pred
def prim_MST(G, start):
"""
Return an oriented tree constructed from prim algorithm starting at 'start'
"""
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
pred = prim(G, start)
T = digraph.DiGraph()
queue = Queue()
queue.put(start)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
svasilev94/GraphLibrary
|
graphlibrary/graph.py
|
Graph.add_vertex
|
python
|
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
|
Add vertex and update vertex attributes
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/graph.py#L49-L55
| null |
class Graph:
"""
Class for undirected graphs
"""
vertices = dict
edges = dict
def __init__(self):
"""
Initialize undirected graph
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u;v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
def __iter__(self):
"""
Iterate over the vertices -> for n in G
"""
return iter(self.vertices)
def __contains__(self, vertex):
"""
Check if vertex is in G.vertices -> vertex in G
"""
try:
return vertex in self.vertices
except:
return False
def __len__(self):
"""
Return the number of vertices -> len(G)
"""
return len(self.vertices)
def __getitem__(self, vertex):
"""
Return a dict of neighbors of vertex -> G[vertex]
"""
return self.vertices[vertex]
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
def add_edge(self, u, v, **attr):
"""
Add an edge between vertices u and v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
if v not in self.vertices:
self.vertices[v] = []
vertex = (u, v)
self.edges[vertex] = {}
if attr:
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.vertices[v].append(u)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
def remove_edge(self, u, v):
"""
Remove the edge between vertices u and v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.vertices[v].remove(u)
def is_edge(self, u, v):
"""
Check if edge between u and v exist
"""
try:
return (u, v) in self.edges
except:
return False
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return False
|
svasilev94/GraphLibrary
|
graphlibrary/graph.py
|
Graph.add_edge
|
python
|
def add_edge(self, u, v, **attr):
"""
Add an edge between vertices u and v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
if v not in self.vertices:
self.vertices[v] = []
vertex = (u, v)
self.edges[vertex] = {}
if attr:
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.vertices[v].append(u)
|
Add an edge between vertices u and v and update edge attributes
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/graph.py#L57-L70
| null |
class Graph:
"""
Class for undirected graphs
"""
vertices = dict
edges = dict
def __init__(self):
"""
Initialize undirected graph
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u;v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
def __iter__(self):
"""
Iterate over the vertices -> for n in G
"""
return iter(self.vertices)
def __contains__(self, vertex):
"""
Check if vertex is in G.vertices -> vertex in G
"""
try:
return vertex in self.vertices
except:
return False
def __len__(self):
"""
Return the number of vertices -> len(G)
"""
return len(self.vertices)
def __getitem__(self, vertex):
"""
Return a dict of neighbors of vertex -> G[vertex]
"""
return self.vertices[vertex]
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
def add_edge(self, u, v, **attr):
"""
Add an edge between vertices u and v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
if v not in self.vertices:
self.vertices[v] = []
vertex = (u, v)
self.edges[vertex] = {}
if attr:
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.vertices[v].append(u)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
def remove_edge(self, u, v):
"""
Remove the edge between vertices u and v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.vertices[v].remove(u)
def is_edge(self, u, v):
"""
Check if edge between u and v exist
"""
try:
return (u, v) in self.edges
except:
return False
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return False
|
svasilev94/GraphLibrary
|
graphlibrary/graph.py
|
Graph.remove_vertex
|
python
|
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
|
Remove vertex from G
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/graph.py#L72-L90
| null |
class Graph:
"""
Class for undirected graphs
"""
vertices = dict
edges = dict
def __init__(self):
"""
Initialize undirected graph
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u;v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
def __iter__(self):
"""
Iterate over the vertices -> for n in G
"""
return iter(self.vertices)
def __contains__(self, vertex):
"""
Check if vertex is in G.vertices -> vertex in G
"""
try:
return vertex in self.vertices
except:
return False
def __len__(self):
"""
Return the number of vertices -> len(G)
"""
return len(self.vertices)
def __getitem__(self, vertex):
"""
Return a dict of neighbors of vertex -> G[vertex]
"""
return self.vertices[vertex]
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
def add_edge(self, u, v, **attr):
"""
Add an edge between vertices u and v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
if v not in self.vertices:
self.vertices[v] = []
vertex = (u, v)
self.edges[vertex] = {}
if attr:
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.vertices[v].append(u)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
def remove_edge(self, u, v):
"""
Remove the edge between vertices u and v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.vertices[v].remove(u)
def is_edge(self, u, v):
"""
Check if edge between u and v exist
"""
try:
return (u, v) in self.edges
except:
return False
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return False
|
svasilev94/GraphLibrary
|
graphlibrary/graph.py
|
Graph.remove_edge
|
python
|
def remove_edge(self, u, v):
"""
Remove the edge between vertices u and v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.vertices[v].remove(u)
|
Remove the edge between vertices u and v
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/graph.py#L92-L101
| null |
class Graph:
"""
Class for undirected graphs
"""
vertices = dict
edges = dict
def __init__(self):
"""
Initialize undirected graph
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u;v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
def __iter__(self):
"""
Iterate over the vertices -> for n in G
"""
return iter(self.vertices)
def __contains__(self, vertex):
"""
Check if vertex is in G.vertices -> vertex in G
"""
try:
return vertex in self.vertices
except:
return False
def __len__(self):
"""
Return the number of vertices -> len(G)
"""
return len(self.vertices)
def __getitem__(self, vertex):
"""
Return a dict of neighbors of vertex -> G[vertex]
"""
return self.vertices[vertex]
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
def add_edge(self, u, v, **attr):
"""
Add an edge between vertices u and v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
if v not in self.vertices:
self.vertices[v] = []
vertex = (u, v)
self.edges[vertex] = {}
if attr:
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.vertices[v].append(u)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
def remove_edge(self, u, v):
"""
Remove the edge between vertices u and v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.vertices[v].remove(u)
def is_edge(self, u, v):
"""
Check if edge between u and v exist
"""
try:
return (u, v) in self.edges
except:
return False
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return False
|
svasilev94/GraphLibrary
|
graphlibrary/graph.py
|
Graph.degree
|
python
|
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
|
Return the degree of a vertex
|
train
|
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/graph.py#L112-L119
| null |
class Graph:
"""
Class for undirected graphs
"""
vertices = dict
edges = dict
def __init__(self):
"""
Initialize undirected graph
vertices dictionary -> {u: [neigbours], }
edges dictionary -> {(u;v): {data: 'info', }, }
vertex attributes dictionary -> {u: {data: 'info', }, }
"""
self.vertices = {}
self.edges = {}
self.nodes = {}
def __iter__(self):
"""
Iterate over the vertices -> for n in G
"""
return iter(self.vertices)
def __contains__(self, vertex):
"""
Check if vertex is in G.vertices -> vertex in G
"""
try:
return vertex in self.vertices
except:
return False
def __len__(self):
"""
Return the number of vertices -> len(G)
"""
return len(self.vertices)
def __getitem__(self, vertex):
"""
Return a dict of neighbors of vertex -> G[vertex]
"""
return self.vertices[vertex]
def add_vertex(self, vertex, **attr):
"""
Add vertex and update vertex attributes
"""
self.vertices[vertex] = []
if attr:
self.nodes[vertex] = attr
def add_edge(self, u, v, **attr):
"""
Add an edge between vertices u and v and update edge attributes
"""
if u not in self.vertices:
self.vertices[u] = []
if v not in self.vertices:
self.vertices[v] = []
vertex = (u, v)
self.edges[vertex] = {}
if attr:
self.edges[vertex].update(attr)
self.vertices[u].append(v)
self.vertices[v].append(u)
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
def remove_edge(self, u, v):
"""
Remove the edge between vertices u and v
"""
try:
self.edges.pop((u, v))
except KeyError:
raise GraphInsertError("Edge %s-%s doesn't exist." % (u, v))
self.vertices[u].remove(v)
self.vertices[v].remove(u)
def is_edge(self, u, v):
"""
Check if edge between u and v exist
"""
try:
return (u, v) in self.edges
except:
return False
def degree(self, vertex):
"""
Return the degree of a vertex
"""
try:
return len(self.vertices[vertex])
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
def is_directed(self):
"""
Return True if graph is directed, False otherwise
"""
return False
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_xfac.py
|
gp_xfac
|
python
|
def gp_xfac():
# prepare data
inDir, outDir = getWorkDirs()
data = OrderedDict()
# TODO: "really" reproduce plot using spectral data
for file in os.listdir(inDir):
info = os.path.splitext(file)[0].split('_')
key = ' '.join(info[:2] + [':',
' - '.join([
str(float(s)/1e3) for s in info[-1][:7].split('-')
]) + ' GeV'
])
file_url = os.path.join(inDir, file)
data[key] = np.loadtxt(open(file_url, 'rb')).reshape((-1,5))
data[key][:, 0] *= shift.get(key, 1)
logging.debug(data) # shown if --log flag given on command line
# generate plot
nSets = len(data)
make_plot(
data = data.values(),
properties = [ getOpts(i) for i in xrange(nSets) ],
titles = data.keys(), # use data keys as legend titles
name = os.path.join(outDir, 'xfac'),
key = [ 'top center', 'maxcols 2', 'width -7', 'font ",20"' ],
ylabel = 'LMR Enhancement Factor',
xlabel = '{/Symbol \326}s_{NN} (GeV)',
yr = [0.5, 6.5], size = '8.5in,8in',
rmargin = 0.99, tmargin = 0.98, bmargin = 0.14,
xlog = True, gpcalls = [
'format x "%g"',
'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
'boxwidth 0.015 absolute'
],
labels = { 'STAR Preliminary': [0.5, 0.5, False] },
lines = { 'x=1': 'lc 0 lw 4 lt 2' }
)
return 'done'
|
example using QM12 enhancement factors
- uses `gpcalls` kwarg to reset xtics
- numpy.loadtxt needs reshaping for input files w/ only one datapoint
- according poster presentations see QM12_ & NSD_ review
.. _QM12: http://indico.cern.ch/getFile.py/access?contribId=268&sessionId=10&resId=0&materialId=slides&confId=181055
.. _NSD: http://rnc.lbl.gov/~xdong/RNC/DirectorReview2012/posters/Huck.pdf
.. image:: pics/xfac.png
:width: 450 px
:ivar key: translates filename into legend/key label
:ivar shift: slightly shift selected data points
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_xfac.py#L12-L63
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n"
] |
import logging, argparse, os, sys
import numpy as np
from collections import OrderedDict
from ..ccsgp.ccsgp import make_plot
from .utils import getWorkDirs
from ..ccsgp.utils import getOpts
shift = {
'STAR Au+Au : 0.3 - 0.75 GeV': 1.06, 'STAR Au+Au : 0.2 - 0.6 GeV': 1.12
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
#parser.add_argument("initial", help="country initial = input subdir with txt files")
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
print gp_xfac()
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_ptspec.py
|
gp_ptspec
|
python
|
def gp_ptspec():
fenergies = ['19', '27', '39', '62', ]# '200']
nen = len(fenergies)
mee_keys = ['pi0', 'LMR', 'omega', 'phi', 'IMR', 'jpsi']
#mee_keys = ['LMR', ]
mee_dict = OrderedDict((k,'') for k in mee_keys)
yscale = { '200': '300', '62': '5000', '39': '50', '27': '0.3', '19': '0.001' }
inDir, outDir = getWorkDirs()
data, data_avpt, dpt_dict = {}, {}, {}
yvals, yvalsPt = [], []
scale = {
'19': 1.3410566491548412, '200': 1.0, '39': 1.2719203877292842,
'27': 1.350873678084769, '62': 1.2664666321635087
}
lmr_label = None
for filename in os.listdir(inDir):
# import data
file_url = os.path.join(inDir, filename)
filebase = os.path.splitext(filename)[0] # unique
energy, mee_name, mee_range, data_type = splitFileName(filebase)
if mee_name == 'LMR':
mee_range_split = map(float, mee_range.split('-'))
lmr_label = 'LMR: %g < M_{ee} < %g GeV/c^{2}' % (
mee_range_split[0], mee_range_split[1]
)
if energy == '200': continue
if mee_name not in mee_keys: continue
mee_dict[mee_name] = mee_range
data[filebase] = np.loadtxt(open(file_url, 'rb'))
if data_type == 'data':
#print data[filebase]
data[filebase] = data[filebase][:-1] # skip mT<0.4 point
if energy == '200': data[filebase][:,(1,3,4)] /= 0.5
# calculate average pT first
mask = (data[filebase][:,0] > 0.4) & (data[filebase][:,0] < 2.2)
avpt_data = data[filebase][mask]
pTs = avpt_data[:,0]
wghts = avpt_data[:,1]
probs = unp.uarray(avpt_data[:,1], avpt_data[:,3]) # dN/pT
probs /= umath.fsum(probs) # probabilities
avpt = umath.fsum(pTs*probs)
logging.info(('%s: {} %g' % (
filebase, np.average(pTs, weights = wghts)
)).format(avpt)) # TODO: syst. uncertainties
# save datapoint for average pT and append to yvalsPt for yaxis range
dp = [ float(getEnergy4Key(energy)), avpt.nominal_value, 0., avpt.std_dev, 0. ]
avpt_key = mee_name
if data_type == 'cocktail': avpt_key += '_c'
if data_type == 'medium': avpt_key += '_m'
if data_type == 'mediumMedOnly': avpt_key += '_mMed'
if data_type == 'mediumQgpOnly': avpt_key += '_mQgp'
if avpt_key in data_avpt: data_avpt[avpt_key].append(dp)
else: data_avpt[avpt_key] = [ dp ]
yvalsPt.append(avpt.nominal_value)
# now adjust data for panel plot and append to yvals
if data_type != 'data':
data[filebase][:,(1,3,4)] /= scale[energy]
data[filebase][:,(1,3,4)] *= float(yscale[energy])
if data_type == 'cocktail' or fnmatch(data_type, '*medium*'):
data[filebase][:,2:] = 0.
yvals += [v for v in data[filebase][:,1] if v > 0]
# prepare dict for panel plot
dpt_dict_key = getSubplotTitle(mee_name, mee_range)
if dpt_dict_key not in dpt_dict:
ndsets = nen*2
# TODO: currently only 19/39/62 medium avail. w/ med/qgp/tot for each
# July14: all energies available; TODO: fix dsidx
if mee_name == 'LMR': ndsets += 4*3
dpt_dict[dpt_dict_key] = [ [None]*ndsets, [None]*ndsets, [None]*ndsets ]
enidx = fenergies.index(energy)
dsidx = enidx
if fnmatch(data_type, '*medium*'):
# 19: 0-2, 27: 3-5, 39: 6-8, 62: 9-11
dsidx = (energy=='19')*0 + (energy=='27')*3 + (energy=='39')*6 + (energy=='62')*9
dsidx += (data_type=='mediumQgpOnly')*0 + (data_type=='mediumMedOnly')*1
dsidx += (data_type=='medium')*2
else:
dsidx += int(mee_name == 'LMR') * 4 * 3 # number of medium calc avail.
dsidx += int(data_type == 'data') * len(fenergies)
dpt_dict[dpt_dict_key][0][dsidx] = data[filebase] # data
if data_type == 'data': # properties
dpt_dict[dpt_dict_key][1][dsidx] = 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[enidx]
elif data_type == 'medium':
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt 1 lw 5 lc %s' % default_colors[enidx]
else:
dpt_dict[dpt_dict_key][1][dsidx] = 'with lines lt %d lw 5 lc %s' % (
2+(data_type=='mediumMedOnly')+(data_type=='mediumQgpOnly')*2, default_colors[enidx]
)
dpt_dict[dpt_dict_key][2][dsidx] = ' '.join([ # legend titles
getEnergy4Key(energy), 'GeV', '{/Symbol \264} %g' % (
Decimal(yscale[energy])#.as_tuple().exponent
)
]) if data_type == 'data' else ''
# use mass range in dict key to sort dpt_dict with increasing mass
plot_key_order = dpt_dict.keys()
plot_key_order.sort(key=lambda x: float(x.split(':')[1].split('-')[0]))
# sort data_avpt by energy and apply x-shift for better visibility
for k in data_avpt: data_avpt[k].sort(key=lambda x: x[0])
energies = [ dp[0] for dp in data_avpt[mee_keys[0]] ]
energies.append(215.) # TODO: think of better upper limit
linsp = {}
for start,stop in zip(energies[:-1],energies[1:]):
linsp[start] = np.linspace(start, stop, num = 4*len(mee_keys))
for k in data_avpt:
key = k.split('_')[0]
for i in xrange(len(data_avpt[k])):
data_avpt[k][i][0] = linsp[energies[i]][mee_keys.index(key)]
# make panel plot
yMin, yMax = 0.5*min(yvals), 3*max(yvals)
make_panel(
dpt_dict = OrderedDict((k,dpt_dict[k]) for k in plot_key_order),
name = os.path.join(outDir, 'ptspec'),
ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
ylog = True, xr = [0, 2.2], yr = [1e-9, 1e4],
#lmargin = 0.12, bmargin = 0.10, tmargin = 1., rmargin = 1.,
key = ['bottom left', 'samplen 0.5', 'width -2', 'opaque'],
arrow_bar = 0.002, layout = '3x2', size = '8in,8in'
)
#make plot for LMR spectra only
#lmr_key = getSubplotTitle('LMR', '0.4-0.76')
#if energy == '200':
# lmr_key = getSubplotTitle('LMR', '0.3-0.76')
#pseudo_point = np.array([[-1,0,0,0,0]])
#model_titles = ['Cocktail + Model', 'Cocktail', 'in-Medium', 'QGP']
#model_props = [
# 'with lines lc %s lw 5 lt %d' % (default_colors[-2], i+1)
# for i in xrange(len(model_titles))
#]
#make_plot(
# data = dpt_dict[lmr_key][0] + [ pseudo_point ] * len(model_titles),
# properties = dpt_dict[lmr_key][1] + model_props,
# titles = dpt_dict[lmr_key][2] + model_titles,
# name = os.path.join(outDir, 'ptspecLMR'),
# ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)',
# xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)',
# ylog = True, xr = [0, 2.0], yr = [1e-8, 100],
# lmargin = 0.15, bmargin = 0.08, rmargin = 0.98, tmargin = 0.84,
# key = ['maxrows 4', 'samplen 0.7', 'width -2', 'at graph 1.,1.2'],
# arrow_bar = 0.005, size = '10in,13in',
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.03,False],
# 'STAR Preliminary': [0.05,0.07,False],
# }
#)
# make mean pt plot
#yMinPt, yMaxPt = 0.95*min(yvalsPt), 1.05*max(yvalsPt)
#make_plot(
# data = [ # cocktail
# np.array(data_avpt[k+'_c']) for k in mee_keys
# ] + [ # medium
# np.array(data_avpt['LMR_m'])
# ] + [ # data
# np.array(data_avpt[k]) for k in mee_keys
# ],
# properties = [
# 'with lines lt 1 lw 4 lc %s' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ] + [
# 'with lines lt 2 lw 4 lc %s' % default_colors[mee_keys.index('LMR')]
# ] + [
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i if i < 5 else i+1]
# for i in xrange(len(mee_keys))
# ],
# titles = [ getMeeLabel(k) for k in mee_keys ] + ['']*(len(mee_keys)+1),
# name = os.path.join(outDir, 'meanPt'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = '{/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# xlog = True, xr = [17,220], yr = [yMinPt, yMaxPt], size = '11in,9in',
# key = [ 'maxrows 1', 'at graph 1, 1.1' ],
# lmargin = 0.11, bmargin = 0.11, tmargin = 1., rmargin = 1.,
# gpcalls = [
# 'format x "%g"',
# 'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ]
#)
## make mean pt plot for LMR only
#make_plot(
# data = [
# np.array(data_avpt['LMR_c']),
# np.array(data_avpt['LMR_m']),
# np.array(data_avpt['LMR'])
# ],
# properties = [
# 'with lines lt 2 lw 4 lc %s' % default_colors[0],
# 'with lines lt 1 lw 4 lc %s' % default_colors[0],
# 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[0]
# ],
# titles = [
# 'cocktail', 'HMBT', getMeeLabel('data')
# ],
# name = os.path.join(outDir, 'meanPtLMR'),
# xlabel = '{/Symbol \326}s_{NN} (GeV)',
# ylabel = 'LMR {/Symbol \341}p_{T}{/Symbol \361} in STAR Acceptance (GeV/c)',
# lmargin = 0.17, bmargin = 0.15, tmargin = 0.95, xlog = True, xr = [17,80],
# yr = [0.65,1.05], #yr = [yMinPt, yMaxPt],
# key = [ 'bottom right' ],
# gpcalls = [
# 'format x "%g"',
# 'xtics (20, 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
# ],
# labels = {
# 'stat. errors only': [0.7,0.95,False], lmr_label: [0.05,0.07,False],
# '0.4 < p_{T} < 2.2 GeV/c': [0.05,0.14,False]
# }
#)
return 'done'
|
example for a 2D-panel plot etc.
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_ptspec.py#L32-L239
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n",
"def getEnergy4Key(energy):\n if energy == '19': return '19.6'\n if energy == '62': return '62.4'\n return energy\n",
"def splitFileName(fn):\n # energy, mee_name, mee_range, data_type\n split_arr = fn.split('_')\n return (\n re.compile('\\d+').search(split_arr[0]).group(),\n split_arr[1], split_arr[2],\n re.compile('(?i)[a-z]+').search(split_arr[0]).group()\n )\n",
"def getSubplotTitle(mn, mr):\n return ' '.join([getMeeLabel(mn), ':', mr, ' GeV/c^{2}'])\n"
] |
import logging, argparse, os, sys, re
import numpy as np
from collections import OrderedDict
from .utils import getWorkDirs, getEnergy4Key
from ..ccsgp.ccsgp import make_panel, make_plot
from ..ccsgp.utils import getOpts
from ..ccsgp.config import default_colors
from decimal import Decimal
import uncertainties.umath as umath
import uncertainties.unumpy as unp
from fnmatch import fnmatch
def getMeeLabel(s):
if s == 'pi0': return '{/Symbol \160}^0'
if s == 'omega': return '{/Symbol \167}'
if s == 'phi': return '{/Symbol \152}'
if s == 'jpsi': return 'J/{/Symbol \171}'
return s
def splitFileName(fn):
# energy, mee_name, mee_range, data_type
split_arr = fn.split('_')
return (
re.compile('\d+').search(split_arr[0]).group(),
split_arr[1], split_arr[2],
re.compile('(?i)[a-z]+').search(split_arr[0]).group()
)
def getSubplotTitle(mn, mr):
return ' '.join([getMeeLabel(mn), ':', mr, ' GeV/c^{2}'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
print gp_ptspec()
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_panel.py
|
gp_panel
|
python
|
def gp_panel(version, skip):
scale = { # QM14 (19 GeV skip later, factor here only informational)
'19': 1.0340571932983775, '200': 1.0, '39': 0.7776679085382481,
'27': 0.6412140408244136, '62': 0.9174700031778402
} if version == 'QM14' else {
'19': 1.3410566491548412, '200': 1.1051002240771077,
'39': 1.2719203877292842, '27': 1.350873678084769,
'62': 1.2664666321635087
}
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = {}
vacRhoTitle = '{/Symbol \162}/{/Symbol \167} VacSF+FB'
for infile in os.listdir(inDir):
if infile == "cocktail_contribs": continue
if infile == 'mediumDmOnly200.dat': continue
energy = re.compile('\d+').search(infile).group()
if skip is not None and energy == skip: continue
data_type = re.sub('%s\.dat' % energy, '', infile)
file_url = os.path.join(inDir, infile)
data_import = np.loadtxt(open(file_url, 'rb'))
if data_type != 'data' and (
(version == 'QM14' and energy != '19') or version == 'LatestPatrickJieYi'
):
data_import[:,(1,3,4)] /= scale[energy]
if data_type == 'cocktail': data_import[:,2:] = 0.
elif fnmatch(data_type, '*medium*') or data_type == 'vacRho':
data_import = data_import[data_import[:,0] < 0.9] \
if energy == '200' and data_type == '+medium' \
else data_import
data_import[:,(2,3)] = 0.
key = getEnergy4Key(energy)
if key not in data: data[key] = {}
data_type_mod = data_type
if data_type_mod == 'cocktail': data_type_mod = 'Cocktail'
elif data_type_mod == 'mediumMedOnly': data_type_mod = 'HMBT'
elif data_type_mod == 'mediumQgpOnly': data_type_mod = 'QGP'
elif data_type_mod == '+medium': data_type_mod = 'Cock. + HMBT + QGP'
elif data_type_mod == 'vacRho': data_type_mod = vacRhoTitle
data[key][data_type_mod] = data_import
plot_order = ['Cocktail', vacRhoTitle, 'QGP', 'HMBT', 'Cock. + HMBT + QGP', 'data']
plot_opts = {
vacRhoTitle: 'with lines lt 2 lw 5 lc %s' % default_colors[6],
'QGP': 'with lines lt 2 lw 5 lc %s' % default_colors[1],
'HMBT': 'with lines lt 2 lw 5 lc %s' % default_colors[2],
'Cock. + HMBT + QGP': 'with filledcurves lt 1 lw 5 pt 0 lc %s' % default_colors[16],
'Cocktail': 'with lines lc %s lw 5 lt 1' % default_colors[8],
'data': 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[0]
}
panel2D_versions = (version == 'LatestPatrickJieYi' or version == 'QM14')
make_panel(
dpt_dict = OrderedDict(
(' '.join([k, 'GeV %s' % (
'{/=18 PRL 113 022301}'
if k == '200' and (
version == 'QM12Latest200' or version == 'QM14' or
version == 'LatestPatrickJieYi'
) else '{/=18 STAR Preliminary}'
)]), [
[ data[k][dt] for dt in plot_order if dt in data[k] ],
[ plot_opts[dt] for dt in plot_order if dt in data[k] ],
[ dt for dt in plot_order if dt in data[k] ]
]) for k in sorted(data, key=float)
),
name = os.path.join(
outDir, 'panel%s%s' % (version, 'No'+skip if skip is not None else '')
),
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]',
xlabel = 'invariant dielectron mass, M_{ee} (GeV/c^{2})',
ylog = True, xr = [0.05, 1.1], yr = [1e-4, 0.5],
#lmargin = 0.12 if panel2D_versions else 0.1,
#bmargin = 0.11 if panel2D_versions else 0.15,
arrow_length = 0.4, arrow_bar = 0.002,
gpcalls = [
'mxtics 2',
'object 50 rectangle back fc rgb "#C6E2FF" from 0.4,1e-4 to 0.75,2e-2'
],
layout = '3x2' if panel2D_versions else ('%dx1' % len(data)),
key = ['width -2', 'at graph 0.95,0.85'],
key_subplot_id = 5 if version != 'QM12' else 0,
size = '8in,8in' if version != 'QM12' else '5in,8.5in'
)
return 'done'
|
example for a panel plot using QM12 data (see gp_xfac)
.. image:: pics/panelQM12.png
:width: 700 px
:param version: plot version / input subdir name
:type version: str
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_panel.py#L10-L100
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n",
"def getEnergy4Key(energy):\n if energy == '19': return '19.6'\n if energy == '62': return '62.4'\n return energy\n"
] |
import logging, argparse, os, sys, re
import numpy as np
from collections import OrderedDict
from .utils import getWorkDirs, getEnergy4Key
from ..ccsgp.ccsgp import make_panel
from ..ccsgp.utils import getOpts
from ..ccsgp.config import default_colors
from fnmatch import fnmatch
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("version", help="version = subdir name of input dir")
parser.add_argument("--skip", help="skip an energy", metavar="energy")
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
print gp_panel(args.version, args.skip)
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_ccX.py
|
gp_ccX
|
python
|
def gp_ccX():
inDir, outDir = getWorkDirs()
data, alldata = OrderedDict(), None
for infile in os.listdir(inDir):
# get key and import data
key = os.path.splitext(infile)[0].replace('_', '/')
data_import = np.loadtxt(open(os.path.join(inDir, infile), 'rb'))
# convert to log10 vs log10 plot, z=log10(y) => dz=0.434*dy/y
data_import[:,3] = 0.434*(data_import[:,3]/data_import[:,1])
data_import[:,(0,1)] = np.log10(data_import[:,(0,1)])
data_import[:,2] = 0
# fill dictionary
data[key] = data_import
alldata = data[key] if alldata is None else np.vstack((alldata, data[key]))
# fit linear part first
lindata = alldata[alldata[:,0]>2.5]
m = (lindata[-1,1]-lindata[0,1])/(lindata[-1,0]-lindata[0,0])
t = lindata[0,1] - m * lindata[0,0]
popt1, pcov = curve_fit(
linear, lindata[:,0], lindata[:,1], p0=[m, t],
sigma=lindata[:,3], absolute_sigma=True
)
# fit full range
popt2, pcov = curve_fit(
lambda x, c, d: fitfunc(x, popt1[0], popt1[1], c, d),
alldata[:,0], alldata[:,1], sigma=alldata[:,3], absolute_sigma=True,
)
popt = np.hstack((popt1, popt2))
model = lambda x: fitfunc(x, *popt)
# calculate mean standard deviation of data from parameterization
yfit = np.array([model(x) for x in alldata[:,0]])
stddev = 1.5*np.sqrt( # multiple of "sigma"
np.average((alldata[:,1]-yfit)**2, weights=1./alldata[:,3])
)
print 'stddev = %.2g' % stddev
errorband = np.array([[x, model(x), 0, 0, stddev] for x in np.linspace(1,4)])
# make plot
fitdata = np.array([[x, model(x), 0, 0, 0] for x in np.linspace(1,4)])
par_names = ['a', 'b', 'c', 'd']
energies = [19.6, 27, 39, 62.4, 200]
labels = dict(
('%s = %.3g' % (par_name, popt[i]), [3.3, 3-i*0.2, True])
for i,par_name in enumerate(par_names)
)
ccX_vals = [10**model(np.log10(energy)) for energy in energies]
ccX = [' '.join([
'%g GeV:' % energy,
'({})'.format(ufloat(ccX_vals[i], stddev/0.434*ccX_vals[i])),
'{/Symbol \155}b'
]) for i,energy in enumerate(energies)]
print ccX
#labels.update(dict(
# (cc, [1+i*0.5, 4.5+(i%2+1)*0.2, True]) for i,cc in enumerate(ccX)
#))
make_plot(
data = [errorband] + data.values() + [fitdata],
properties = [
'with filledcurves lt 1 lw 5 pt 0 lc %s' % default_colors[8]
] + [
'lc %s lw 4 lt 1 pt 18 ps 1.5' % (default_colors[i])
for i in xrange(len(data))
] + ['with lines lc 0 lw 4 lt 1'],
titles = [''] + data.keys() + ['y = ax+b - e^{-cx+d}'],
xlabel = 'x = log_{10}[{/Symbol \326}s_{NN} (GeV)]',
ylabel = 'y = log_{10}[{/Symbol \163}@_{c@^{/=18-}c}^{NN} ({/Symbol \155}b)]',
name = os.path.join(outDir, 'ccX'),
size = '11.4in,8.3in', xr = [1, 4], yr = [0.5,4.5],
key = ['bottom right', 'nobox', 'width -5'],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.13,
lines = dict(
('y=%f' % (np.log10(energy)), 'lc 0 lw 2 lt 2') for energy in energies
), labels = labels,
)
|
fit experimental data
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_ccX.py#L19-L92
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n"
] |
import os, argparse, logging
from .utils import getWorkDirs, getEnergy4Key, particleLabel4Key
from collections import OrderedDict
from ..ccsgp.ccsgp import make_plot, make_panel
from ..ccsgp.config import default_colors
from scipy.optimize import curve_fit
import numpy as np
from uncertainties import ufloat
def linear(x, a, b):
return a*x+b
def exp(x, c, d):
return np.exp(-c*x+d)
def fitfunc(x, a, b, c, d):
return linear(x, a, b) - exp(x, c, d)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
gp_ccX()
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_rapp.py
|
gp_rapp
|
python
|
def gp_rapp():
inDir, outDir = getWorkDirs()
# prepare data
yields = {}
for infile in os.listdir(inDir):
energy = re.compile('\d+').search(infile).group()
medium = np.loadtxt(open(os.path.join(inDir, infile), 'rb'))
getMassRangesSums(energy, medium, yields)
data = dict( # sort by energy
(k, np.array(sorted(v)))
for k, v in yields.iteritems()
)
for k in data: data[k][:,1] /= data[k][-1,1] # divide by 200
# make plot
nSets = len(data)
make_plot(
data = [ data[k][:-1] for k in data ],
properties = [
'with linespoints lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors[i]
for i in xrange(nSets)
],
titles = data.keys(), # TODO: titles order correct?
name = os.path.join(outDir, 'rapp'),
xlabel = '{/Symbol \326}s_{NN} (GeV)', ylabel = 'Rapp Ratio to 200 GeV',
lmargin = 0.1, key = ['left'], yr = [0.1, 0.8]
)
return 'done'
|
rho in-medium ratios by Rapp (based on protected data)
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_rapp.py#L9-L36
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n",
"def getMassRangesSums(\n indata, suffix = \"\", customRanges = None,\n onlyLMR = False, systLMR = False, singleRange = False\n):\n eRangesSyst = [ eRanges if customRanges is None else customRanges ]\n if systLMR:\n step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]\n eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study\n Decimal(str(rangeOffsetsLMR[j]+i*step_size))\n for i in xrange(nsteps)\n ] for j in xrange(2) ]\n # all combos of lower and upper LMR edges\n eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]\n onlyLMR = False # flag meaningless in this case\n uInData = getUArray(indata)\n eInData = getEdges(indata)\n uSums = {}\n for erngs in eRangesSyst:\n for i, (e0, e1) in enumzipEdges(erngs):\n if onlyLMR and i != 1: continue\n uSum = getCocktailSum(e0, e1, eInData, uInData)\n if (not systLMR) and (onlyLMR or singleRange): return uSum\n logging.debug('%g - %g: %r' % (e0, e1, uSum))\n key = mass_titles[1 if systLMR else i] + suffix\n if systLMR: key += '_%s-%s' % (e0,e1)\n uSums[key] = uSum\n return uSums\n"
] |
import logging, argparse, os, re
import numpy as np
from ..ccsgp.config import default_colors
from ..ccsgp.ccsgp import make_plot, make_panel
from .utils import getWorkDirs, getMassRangesSums, getEnergy4Key
from math import pi, log
from collections import OrderedDict
def calc_REW_eta(nf):
charges = [2./3., -1./3., -1./3.] # u/d/s
eq = np.array(charges[:nf])
REW = 3*np.sum(eq*eq)
eta = np.sum(eq)**2/REW
return REW, eta
def calc_pQCD(nf): # nf = 2,3
REW, eta = calc_REW_eta(nf)
limits = [0.5, 1.02, 3]
L = 0.217
beta0 = (11-2./3.*nf)/(4*pi)
cn = [
1, 1.9857-0.1152*nf,
-6.63694-1.20013*nf-0.00518*nf**2-1.240*eta,
-156.61+18.775*nf-0.7974*nf**2+0.0215*nf**3+(17.828-0.575*nf)*eta
]
print 'beta0 = ', beta0, ', eta = ', eta
print 'cn = ', cn
alpha_s = lambda Q: 1./(beta0*log((Q/L)**2)) # center-of-mass energy Q
def delta_QCD(Q):
delta = 0.
for n, c in enumerate(cn):
delta += c * (alpha_s(Q)/pi)**(n+1)
return delta
return np.array([
[roots, REW*(1.+delta_QCD(roots)), 0, 0, 0]
for roots in np.linspace(limits[nf-2], limits[nf-1])
])
def gp_ee_hadrons_xsec():
inDir, outDir = getWorkDirs()
infile = os.path.join(inDir, 'ee_hadrons_xsec.dat')
data = np.loadtxt(open(infile, 'rb'))
data[:,-1] = 0 # ignore systematic uncertainties
pQCD = calc_pQCD(2)
pQCD = np.vstack((pQCD, calc_pQCD(3)))
REW = [ calc_REW_eta(nf)[0] for nf in [2,3] ]
print 'REW = ', REW
rew = np.array([
[0.5, REW[0], 0, 0, 0], [1.02, REW[0], 0, 0, 0],
[1.02, REW[1], 0, 0, 0], [3, REW[1], 0, 0, 0],
])
make_plot(
data = [data, rew, pQCD],
properties = [
'lc %s lw 2 lt 1 pt 18 ps 0.8' % (default_colors[0]),
'with lines lt 2 lw 3 lc %s' % (default_colors[1]),
'with lines lt 2 lw 3 lc %s' % (default_colors[2])
],
titles = ['world data', 'naive quark-parton model', 'perturbative QCD'],
name = os.path.join(outDir, 'ee_hadrons_xsec'),
xlabel = '{/Symbol \326}s = M_{ee} (GeV)',
ylabel = 'R = {/Symbol \163}(e^{+}e^{-}{/Symbol \256}hadrons) / {/Symbol \163}(e^{+}e^{-}{/Symbol \256}{/Symbol \155}^{+}{/Symbol \155}^{-})',
size = '10in,7.5in', xr = [0.5, 3], yr = [0.7,60], ylog=True,
bmargin = 0.14, rmargin = 0.99, tmargin = 0.99,
gpcalls = ['bars small', 'format y "%g"'], key = ['width -6', 'nobox'],
labels = {
'{/Symbol \162}': [0.7,1.1,True], '{/Symbol \167}': [0.8,25,True],
'{/Symbol \146}': [1.05,40,True], "{/Symbol \162}'": [1.7,4,True],
}
)
def gp_rapp_overview_panel():
inDir, outDir = getWorkDirs()
energies = ['19', '27', '39', '62']
subkeys = ['Energy Dependence', '27 GeV Medium Effects']
dpt_dict = OrderedDict((subkey, [[], [], []]) for subkey in subkeys)
pseudo_data = np.array([[0,1,0,0,0]])
for i,title in enumerate([
'HMBT+QGP', 'HMBT', 'QGP', '{/Symbol \162}/{/Symbol \167} VacSF+FB', 'Cocktail (w/o {/Symbol \162})'
]):
dpt_dict[subkeys[0]][0].append(pseudo_data)
dpt_dict[subkeys[0]][1].append(
'with lines lt %d lc rgb "black" lw 5' % (i+1)
)
dpt_dict[subkeys[0]][2].append(title)
for i,modeltype in enumerate(['MedOnly', 'QgpOnly']):
infile = os.path.join(inDir, 'medium'+modeltype+'19.dat')
data = np.loadtxt(open(infile, 'rb'))
data[:,2:] = 0
dpt_dict[subkeys[0]][0].append(data)
dpt_dict[subkeys[0]][1].append(
'with lines lt %d lc %s lw 5' % (i+2, default_colors[0])
)
dpt_dict[subkeys[0]][2].append('')
for i,energy in enumerate(energies):
infile = os.path.join(inDir, 'medium'+energy+'.dat')
data = np.loadtxt(open(infile, 'rb'))
data[:,2:] = 0
dpt_dict[subkeys[0]][0].append(data)
dpt_dict[subkeys[0]][1].append('with lines lt 1 lc %s lw 5' % default_colors[i])
dpt_dict[subkeys[0]][2].append(' '.join([getEnergy4Key(energy), 'GeV']))
linetypes = [5,4,1]
for i,infile in enumerate([
'../../gp_panel/input/LatestPatrickJieYi/cocktail27.dat',
'vacRho27.dat', 'medium27.dat'
]):
data = np.loadtxt(open(os.path.join(inDir, infile), 'rb'))
data[:,(2,3)] = 0
if i != 2: data[:,4] = 0
dpt_dict[subkeys[1]][0].append(data)
dpt_dict[subkeys[1]][1].append('with %s lt %d lc %s lw 5' % (
'filledcurves pt 0' if i == 2 else 'lines', linetypes[i], default_colors[1]
))
dpt_dict[subkeys[1]][2].append('')
yr = [2e-5, 0.07]
make_panel(
dpt_dict = dpt_dict,
name = os.path.join(outDir, 'rapp_overview_panel'),
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]',
xlabel = 'invariant dielectron mass, M_{ee} (GeV/c^{2})',
ylog = True, xr = [0.3, 1.45], yr = yr,
layout = '2x1', size = '4in,9in', key = ['opaque', 'width -5'],
gpcalls = [
'object 51 rectangle back fc rgb "grey" from 0.75,%f to 0.825,%f' % (yr[0]*2, yr[1]/4),
'object 52 rectangle back fc rgb "grey" from 0.95,%f to 1.08,%f' % (yr[0]*2, yr[1]/4),
'object 53 rectangle back fc rgb "#C6E2FF" from 0.4,%f to 0.75,%f' % (yr[0]*2, yr[1]/4),
]
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
#print gp_rapp()
#gp_ee_hadrons_xsec()
gp_rapp_overview_panel()
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_sims.py
|
gp_sims
|
python
|
def gp_sims(version):
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version, 'cocktail_contribs')
xmax = {
'pion': 0.125, 'eta': 0.52, 'etap': 0.92, 'omega': 1.22,
'phi': 1.22, 'jpsi': 3.52
}
for particles in [
'pion', 'eta', 'etap', ['omega', 'rho'], 'phi', 'jpsi'
]:
if isinstance(particles, str):
particles = [particles]
contribs = OrderedDict()
for particle in particles:
for energy in energies:
fstem = particle+str(energy)
fname = os.path.join(inDir, fstem+'.dat')
contribs[fstem] = np.loadtxt(open(fname, 'rb'))
contribs[fstem][:,2:] = 0
print contribs.keys()
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
] + [ '' for k in range(len(contribs)-len(energies)) ]
make_plot(
data = contribs.values(),
properties = [
'with lines lc %s lw 4 lt %d' % (
default_colors[i%len(energies)], i/len(energies)+1
) for i in xrange(len(contribs))
],
titles = titles,
xlabel = xlabel, # if particles[0] == 'phi' else '',
ylabel = ylabel, # if particles[0] == 'pion' or particles[0] == 'omega' else '',
name = os.path.join(outDir, '_'.join(['sims']+particles)),
ylog = True, lmargin = 0.15, bmargin = 0.17, tmargin = 0.96, rmargin = 0.98,
gpcalls = [ 'nokey' ] if particles[0] != 'pion' else [],
xr = [1. if particles[0] == 'jpsi' else 0,xmax[particles[0]]],
size = '10in,6.5in',
labels = {
particleLabel4Key(particles[0]): [0.15,0.9,False],
particleLabel4Key(particles[1]) if len(particles) > 1 else '': [0.15,0.1,False],
}
)
|
example for a batch generating simple plots (cocktail contributions)
:param version: plot version / input subdir name
:type version: str
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_sims.py#L12-L60
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n",
"def particleLabel4Key(k):\n if k == 'pion': return '{/Symbol \\160}^0 {/Symbol \\256} e^{+}e^{-}{/Symbol \\147}'\n if k == 'eta': return '{/Symbol \\150} {/Symbol \\256} e^{+}e^{-}{/Symbol \\147}'\n if k == 'etap': return '{/Symbol \\150}\\' {/Symbol \\256} e^{+}e^{-}{/Symbol \\147}'\n if k == 'rho': return '{/Symbol \\162} {/Symbol \\256} e^{+}e^{-}'\n if k == 'omega': return '{/Symbol \\167} {/Symbol \\256} e^{+}e^{-}({/Symbol \\160})'\n if k == 'phi': return '{/Symbol \\146} {/Symbol \\256} e^{+}e^{-}({/Symbol \\150})'\n if k == 'jpsi': return 'J/{/Symbol \\171} {/Symbol \\256} e^{+}e^{-}'\n if k == 'ccbar':\n return 'c@^{/=18-}c {/Symbol \\256} D/{/Symbol \\514} {/Symbol \\256} e^{+}e^{-}'\n return k\n"
] |
import os, argparse, logging
from .utils import getWorkDirs, getEnergy4Key, particleLabel4Key
from collections import OrderedDict
from ..ccsgp.ccsgp import make_plot, make_panel
from ..ccsgp.config import default_colors
import numpy as np
energies = [19, 27, 39, 62]
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})'
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]'
def gp_sims_panel(version):
"""panel plot of cocktail simulations at all energies, includ. total
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
mesons = ['pion', 'eta', 'etap', 'rho', 'omega', 'phi', 'jpsi']
fstems = ['cocktail_contribs/'+m for m in mesons] + ['cocktail', 'cocktail_contribs/ccbar']
data = OrderedDict((energy, [
np.loadtxt(open(os.path.join(inDir, fstem+str(energy)+'.dat'), 'rb'))
for fstem in fstems
]) for energy in energies)
for v in data.values():
# keep syserrs for total cocktail
v[-2][:,(2,3)] = 0
# all errors zero for cocktail contribs
v[-1][:,2:] = 0
for d in v[:-2]: d[:,2:] = 0
make_panel(
dpt_dict = OrderedDict((
' '.join([getEnergy4Key(str(energy)), 'GeV']),
[ data[energy], [
'with %s lc %s lw 5 lt %d' % (
'lines' if i!=len(fstems)-2 else 'filledcurves pt 0',
default_colors[(-2*i-2) if i!=len(fstems)-2 else 0] \
if i!=len(fstems)-1 else default_colors[1], int(i==3)+1
) for i in xrange(len(fstems))
], [particleLabel4Key(m) for m in mesons] + [
'Cocktail (w/o {/Symbol \162})', particleLabel4Key('ccbar')
]]
) for energy in energies),
name = os.path.join(outDir, 'sims_panel'),
ylog = True, xr = [0.,3.2], yr = [1e-6,9],
xlabel = xlabel, ylabel = ylabel,
layout = '2x2', size = '7in,9in',
key = ['width -4', 'spacing 1.5', 'nobox', 'at graph 0.9,0.95']
)
def gp_sims_total_overlay(version):
"""single plot comparing total cocktails at all energies
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = OrderedDict()
for energy in energies:
fname = os.path.join(inDir, 'cocktail'+str(energy)+'.dat')
data[energy] = np.loadtxt(open(fname, 'rb'))
data[energy][:,2:] = 0
make_plot(
data = data.values(),
properties = [
'with lines lc %s lw 4 lt 1' % (default_colors[i])
for i in xrange(len(energies))
],
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
],
xlabel = xlabel, ylabel = ylabel,
name = os.path.join(outDir, 'sims_total_overlay'),
ylog = True, xr = [0.,3.2], yr = [1e-6,9],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.14,
size = '8.5in,8in',
)
def gp_sims_totalerrors_overlay(version):
"""single plot comparing syst. uncertainties on total cocktails at all energies
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = OrderedDict()
for energy in energies:
fname = os.path.join(inDir, 'cocktail'+str(energy)+'.dat')
data[energy] = np.loadtxt(open(fname, 'rb'))
data[energy][:,1] = data[energy][:,4]/data[energy][:,1]
data[energy][:,2:] = 0
make_plot(
data = data.values(),
properties = [
'with lines lc %s lw 4 lt 1' % (default_colors[i])
for i in xrange(len(energies))
],
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
],
xlabel = xlabel, ylabel = 'total relative systematic uncertainty',
name = os.path.join(outDir, 'sims_totalerrors_overlay'),
xr = [0.,3.2], yr = [0.15,0.65], key = ['at graph 0.7,0.3'],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.14,
size = '8.5in,8in',
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("version", help="version = subdir name of input dir")
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
#gp_sims(args.version)
#gp_sims_panel(args.version)
#gp_sims_total_overlay(args.version)
gp_sims_totalerrors_overlay(args.version)
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_sims.py
|
gp_sims_panel
|
python
|
def gp_sims_panel(version):
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
mesons = ['pion', 'eta', 'etap', 'rho', 'omega', 'phi', 'jpsi']
fstems = ['cocktail_contribs/'+m for m in mesons] + ['cocktail', 'cocktail_contribs/ccbar']
data = OrderedDict((energy, [
np.loadtxt(open(os.path.join(inDir, fstem+str(energy)+'.dat'), 'rb'))
for fstem in fstems
]) for energy in energies)
for v in data.values():
# keep syserrs for total cocktail
v[-2][:,(2,3)] = 0
# all errors zero for cocktail contribs
v[-1][:,2:] = 0
for d in v[:-2]: d[:,2:] = 0
make_panel(
dpt_dict = OrderedDict((
' '.join([getEnergy4Key(str(energy)), 'GeV']),
[ data[energy], [
'with %s lc %s lw 5 lt %d' % (
'lines' if i!=len(fstems)-2 else 'filledcurves pt 0',
default_colors[(-2*i-2) if i!=len(fstems)-2 else 0] \
if i!=len(fstems)-1 else default_colors[1], int(i==3)+1
) for i in xrange(len(fstems))
], [particleLabel4Key(m) for m in mesons] + [
'Cocktail (w/o {/Symbol \162})', particleLabel4Key('ccbar')
]]
) for energy in energies),
name = os.path.join(outDir, 'sims_panel'),
ylog = True, xr = [0.,3.2], yr = [1e-6,9],
xlabel = xlabel, ylabel = ylabel,
layout = '2x2', size = '7in,9in',
key = ['width -4', 'spacing 1.5', 'nobox', 'at graph 0.9,0.95']
)
|
panel plot of cocktail simulations at all energies, includ. total
:param version: plot version / input subdir name
:type version: str
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_sims.py#L62-L100
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n"
] |
import os, argparse, logging
from .utils import getWorkDirs, getEnergy4Key, particleLabel4Key
from collections import OrderedDict
from ..ccsgp.ccsgp import make_plot, make_panel
from ..ccsgp.config import default_colors
import numpy as np
energies = [19, 27, 39, 62]
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})'
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]'
def gp_sims(version):
"""example for a batch generating simple plots (cocktail contributions)
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version, 'cocktail_contribs')
xmax = {
'pion': 0.125, 'eta': 0.52, 'etap': 0.92, 'omega': 1.22,
'phi': 1.22, 'jpsi': 3.52
}
for particles in [
'pion', 'eta', 'etap', ['omega', 'rho'], 'phi', 'jpsi'
]:
if isinstance(particles, str):
particles = [particles]
contribs = OrderedDict()
for particle in particles:
for energy in energies:
fstem = particle+str(energy)
fname = os.path.join(inDir, fstem+'.dat')
contribs[fstem] = np.loadtxt(open(fname, 'rb'))
contribs[fstem][:,2:] = 0
print contribs.keys()
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
] + [ '' for k in range(len(contribs)-len(energies)) ]
make_plot(
data = contribs.values(),
properties = [
'with lines lc %s lw 4 lt %d' % (
default_colors[i%len(energies)], i/len(energies)+1
) for i in xrange(len(contribs))
],
titles = titles,
xlabel = xlabel, # if particles[0] == 'phi' else '',
ylabel = ylabel, # if particles[0] == 'pion' or particles[0] == 'omega' else '',
name = os.path.join(outDir, '_'.join(['sims']+particles)),
ylog = True, lmargin = 0.15, bmargin = 0.17, tmargin = 0.96, rmargin = 0.98,
gpcalls = [ 'nokey' ] if particles[0] != 'pion' else [],
xr = [1. if particles[0] == 'jpsi' else 0,xmax[particles[0]]],
size = '10in,6.5in',
labels = {
particleLabel4Key(particles[0]): [0.15,0.9,False],
particleLabel4Key(particles[1]) if len(particles) > 1 else '': [0.15,0.1,False],
}
)
def gp_sims_total_overlay(version):
"""single plot comparing total cocktails at all energies
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = OrderedDict()
for energy in energies:
fname = os.path.join(inDir, 'cocktail'+str(energy)+'.dat')
data[energy] = np.loadtxt(open(fname, 'rb'))
data[energy][:,2:] = 0
make_plot(
data = data.values(),
properties = [
'with lines lc %s lw 4 lt 1' % (default_colors[i])
for i in xrange(len(energies))
],
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
],
xlabel = xlabel, ylabel = ylabel,
name = os.path.join(outDir, 'sims_total_overlay'),
ylog = True, xr = [0.,3.2], yr = [1e-6,9],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.14,
size = '8.5in,8in',
)
def gp_sims_totalerrors_overlay(version):
"""single plot comparing syst. uncertainties on total cocktails at all energies
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = OrderedDict()
for energy in energies:
fname = os.path.join(inDir, 'cocktail'+str(energy)+'.dat')
data[energy] = np.loadtxt(open(fname, 'rb'))
data[energy][:,1] = data[energy][:,4]/data[energy][:,1]
data[energy][:,2:] = 0
make_plot(
data = data.values(),
properties = [
'with lines lc %s lw 4 lt 1' % (default_colors[i])
for i in xrange(len(energies))
],
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
],
xlabel = xlabel, ylabel = 'total relative systematic uncertainty',
name = os.path.join(outDir, 'sims_totalerrors_overlay'),
xr = [0.,3.2], yr = [0.15,0.65], key = ['at graph 0.7,0.3'],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.14,
size = '8.5in,8in',
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("version", help="version = subdir name of input dir")
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
#gp_sims(args.version)
#gp_sims_panel(args.version)
#gp_sims_total_overlay(args.version)
gp_sims_totalerrors_overlay(args.version)
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_sims.py
|
gp_sims_total_overlay
|
python
|
def gp_sims_total_overlay(version):
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = OrderedDict()
for energy in energies:
fname = os.path.join(inDir, 'cocktail'+str(energy)+'.dat')
data[energy] = np.loadtxt(open(fname, 'rb'))
data[energy][:,2:] = 0
make_plot(
data = data.values(),
properties = [
'with lines lc %s lw 4 lt 1' % (default_colors[i])
for i in xrange(len(energies))
],
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
],
xlabel = xlabel, ylabel = ylabel,
name = os.path.join(outDir, 'sims_total_overlay'),
ylog = True, xr = [0.,3.2], yr = [1e-6,9],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.14,
size = '8.5in,8in',
)
|
single plot comparing total cocktails at all energies
:param version: plot version / input subdir name
:type version: str
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_sims.py#L102-L130
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n"
] |
import os, argparse, logging
from .utils import getWorkDirs, getEnergy4Key, particleLabel4Key
from collections import OrderedDict
from ..ccsgp.ccsgp import make_plot, make_panel
from ..ccsgp.config import default_colors
import numpy as np
energies = [19, 27, 39, 62]
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})'
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]'
def gp_sims(version):
"""example for a batch generating simple plots (cocktail contributions)
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version, 'cocktail_contribs')
xmax = {
'pion': 0.125, 'eta': 0.52, 'etap': 0.92, 'omega': 1.22,
'phi': 1.22, 'jpsi': 3.52
}
for particles in [
'pion', 'eta', 'etap', ['omega', 'rho'], 'phi', 'jpsi'
]:
if isinstance(particles, str):
particles = [particles]
contribs = OrderedDict()
for particle in particles:
for energy in energies:
fstem = particle+str(energy)
fname = os.path.join(inDir, fstem+'.dat')
contribs[fstem] = np.loadtxt(open(fname, 'rb'))
contribs[fstem][:,2:] = 0
print contribs.keys()
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
] + [ '' for k in range(len(contribs)-len(energies)) ]
make_plot(
data = contribs.values(),
properties = [
'with lines lc %s lw 4 lt %d' % (
default_colors[i%len(energies)], i/len(energies)+1
) for i in xrange(len(contribs))
],
titles = titles,
xlabel = xlabel, # if particles[0] == 'phi' else '',
ylabel = ylabel, # if particles[0] == 'pion' or particles[0] == 'omega' else '',
name = os.path.join(outDir, '_'.join(['sims']+particles)),
ylog = True, lmargin = 0.15, bmargin = 0.17, tmargin = 0.96, rmargin = 0.98,
gpcalls = [ 'nokey' ] if particles[0] != 'pion' else [],
xr = [1. if particles[0] == 'jpsi' else 0,xmax[particles[0]]],
size = '10in,6.5in',
labels = {
particleLabel4Key(particles[0]): [0.15,0.9,False],
particleLabel4Key(particles[1]) if len(particles) > 1 else '': [0.15,0.1,False],
}
)
def gp_sims_panel(version):
"""panel plot of cocktail simulations at all energies, includ. total
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
mesons = ['pion', 'eta', 'etap', 'rho', 'omega', 'phi', 'jpsi']
fstems = ['cocktail_contribs/'+m for m in mesons] + ['cocktail', 'cocktail_contribs/ccbar']
data = OrderedDict((energy, [
np.loadtxt(open(os.path.join(inDir, fstem+str(energy)+'.dat'), 'rb'))
for fstem in fstems
]) for energy in energies)
for v in data.values():
# keep syserrs for total cocktail
v[-2][:,(2,3)] = 0
# all errors zero for cocktail contribs
v[-1][:,2:] = 0
for d in v[:-2]: d[:,2:] = 0
make_panel(
dpt_dict = OrderedDict((
' '.join([getEnergy4Key(str(energy)), 'GeV']),
[ data[energy], [
'with %s lc %s lw 5 lt %d' % (
'lines' if i!=len(fstems)-2 else 'filledcurves pt 0',
default_colors[(-2*i-2) if i!=len(fstems)-2 else 0] \
if i!=len(fstems)-1 else default_colors[1], int(i==3)+1
) for i in xrange(len(fstems))
], [particleLabel4Key(m) for m in mesons] + [
'Cocktail (w/o {/Symbol \162})', particleLabel4Key('ccbar')
]]
) for energy in energies),
name = os.path.join(outDir, 'sims_panel'),
ylog = True, xr = [0.,3.2], yr = [1e-6,9],
xlabel = xlabel, ylabel = ylabel,
layout = '2x2', size = '7in,9in',
key = ['width -4', 'spacing 1.5', 'nobox', 'at graph 0.9,0.95']
)
def gp_sims_totalerrors_overlay(version):
"""single plot comparing syst. uncertainties on total cocktails at all energies
:param version: plot version / input subdir name
:type version: str
"""
inDir, outDir = getWorkDirs()
inDir = os.path.join(inDir, version)
data = OrderedDict()
for energy in energies:
fname = os.path.join(inDir, 'cocktail'+str(energy)+'.dat')
data[energy] = np.loadtxt(open(fname, 'rb'))
data[energy][:,1] = data[energy][:,4]/data[energy][:,1]
data[energy][:,2:] = 0
make_plot(
data = data.values(),
properties = [
'with lines lc %s lw 4 lt 1' % (default_colors[i])
for i in xrange(len(energies))
],
titles = [
' '.join([getEnergy4Key(str(energy)), 'GeV'])
for energy in energies
],
xlabel = xlabel, ylabel = 'total relative systematic uncertainty',
name = os.path.join(outDir, 'sims_totalerrors_overlay'),
xr = [0.,3.2], yr = [0.15,0.65], key = ['at graph 0.7,0.3'],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.14,
size = '8.5in,8in',
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("version", help="version = subdir name of input dir")
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
#gp_sims(args.version)
#gp_sims_panel(args.version)
#gp_sims_total_overlay(args.version)
gp_sims_totalerrors_overlay(args.version)
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/utils.py
|
getWorkDirs
|
python
|
def getWorkDirs():
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
|
get input/output dirs (same input/output layout as for package)
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L9-L27
| null |
import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getUArray(npArr):
"""uncertainty array multiplied by binwidth (col2 = dx)"""
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
def getErrorComponent(result, tag):
"""get total error contribution for component with specific tag"""
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
def getEdges(npArr):
"""get np array of bin edges"""
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/utils.py
|
getUArray
|
python
|
def getUArray(npArr):
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
|
uncertainty array multiplied by binwidth (col2 = dx)
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L29-L39
| null |
import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
def getErrorComponent(result, tag):
"""get total error contribution for component with specific tag"""
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
def getEdges(npArr):
"""get np array of bin edges"""
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/utils.py
|
getErrorComponent
|
python
|
def getErrorComponent(result, tag):
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
|
get total error contribution for component with specific tag
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L41-L47
| null |
import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
def getUArray(npArr):
"""uncertainty array multiplied by binwidth (col2 = dx)"""
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
def getEdges(npArr):
"""get np array of bin edges"""
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/utils.py
|
getEdges
|
python
|
def getEdges(npArr):
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
|
get np array of bin edges
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L49-L52
| null |
import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
def getUArray(npArr):
"""uncertainty array multiplied by binwidth (col2 = dx)"""
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
def getErrorComponent(result, tag):
"""get total error contribution for component with specific tag"""
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/utils.py
|
getMaskIndices
|
python
|
def getMaskIndices(mask):
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
|
get lower and upper index of mask
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L54-L58
| null |
import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
def getUArray(npArr):
"""uncertainty array multiplied by binwidth (col2 = dx)"""
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
def getErrorComponent(result, tag):
"""get total error contribution for component with specific tag"""
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
def getEdges(npArr):
"""get np array of bin edges"""
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/utils.py
|
getCocktailSum
|
python
|
def getCocktailSum(e0, e1, eCocktail, uCocktail):
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
|
get the cocktail sum for a given data bin range
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L64-L108
|
[
"def getMaskIndices(mask):\n \"\"\"get lower and upper index of mask\"\"\"\n return [\n list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)\n ]\n"
] |
import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
def getUArray(npArr):
"""uncertainty array multiplied by binwidth (col2 = dx)"""
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
def getErrorComponent(result, tag):
"""get total error contribution for component with specific tag"""
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
def getEdges(npArr):
"""get np array of bin edges"""
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_background.py
|
gp_background
|
python
|
def gp_background():
inDir, outDir = getWorkDirs()
data, REBIN = OrderedDict(), None
titles = [ 'SE_{+-}', 'SE@^{corr}_{/Symbol \\261\\261}', 'ME@^{N}_{+-}' ]
Apm = OrderedDict([
('19', 0.026668), ('27', 0.026554), ('39', 0.026816), ('62', 0.026726)
])
fake = np.array([[-1, 1, 0, 0, 0]])
lines = {'y=0.9': 'lc {} lt 2 lw 3'.format(default_colors[-4])}
for energy in ['19', '27', '39', '62']:
ekey = ' '.join([getEnergy4Key(energy), 'GeV'])
data[ekey] = [[], [], []]
for didx,dtype in enumerate(['epsPt', 'ngmPt_corr', 'epmPt']):
for idx,infile in enumerate(glob.glob(os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-differential', '%s_*.dat' % dtype
)))):
file_url = os.path.realpath(os.path.join(inDir, infile))
data_import = np.loadtxt(open(file_url, 'rb'))
if REBIN is None: REBIN = int(data_import[-1][2]*2*1000) # MeV
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
if dtype == 'ngmPt_corr':
data_import = data_import[data_import[:,0] <= 0.9]
if dtype == 'epmPt':
data_import = data_import[data_import[:,0] > 0.9]
data_import[:,(1,4)] *= Apm[energy]
col = colorscale(default_colors[didx], 1.+idx*0.2)
momrange = os.path.basename(infile).split('_')[-1][:-4]
if idx < 1:
data[ekey][0].append(fake)
data[ekey][1].append('with lines lt 1 lw 5 lc %s' % col)
data[ekey][2].append(titles[didx])
data[ekey][0].append(data_import)
data[ekey][1].append('lt 1 lw 3 lc %s pt 0' % col)
data[ekey][2].append('')
# unsubtracted background
make_panel(
name = '%s/methods' % outDir, dpt_dict = data,
xr = [0,3.35], yr = [0.9,2e5], ylog = True, lines = lines,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = 'counts / %d MeV/c^{2}' % REBIN, layout = '2x2',
key = ['spacing 1.6', 'nobox'], gpcalls = ['boxwidth 0.002'],
labels = {
'{/=20 the lighter the color, the higher the p_{T}}': (1.5, 1e5)
}, key_subplot_id = 1,
)
return 'done'
# background ratio and acc.corr.
make_plot(
name = '%s/ratios%s' % (outDir, energy),
xr = [0,1.6], yr = [0.95,1.2],
data = graph_data[3:],
properties = [
'with filledcurves lt 1 lw 3 lc %s pt 0' % default_colors[i]
for i in xrange(2)
],
titles = [
'SE_{/Symbol \\261\\261} / ME_{/Symbol \\261\\261}',
'f_{acc} = ME_{+-} / ME_{/Symbol \\261\\261}'
],
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = '', key = [ 'width -2' ],
labels = { '%s GeV' % energy: (0.4, 0.97) }
)
# signal-to-background ratio in rho/omega region vs. energy
graph_data_sn = []
for infile in os.listdir(os.path.join(inDir, 'sn')):
energy = re.compile('\d+').search(infile).group()
file_url = os.path.join(inDir, 'sn', infile)
data_import = np.loadtxt(open(file_url, 'rb'))
mask = (data_import[:,0] > 0.3) & (data_import[:,0] < 0.75)
data_import = data_import[mask]
weights = 1./data_import[:,3]
sn = np.average(data_import[:,1], weights = weights)
sn_err = np.average((data_import[:,1]-sn)**2, weights = weights)
graph_data_sn.append([float(getEnergy4Key(energy)), sn, 0, sn_err, 0])
graph_data_sn = np.array(graph_data_sn)
make_plot(
name = '%s/SNvsEnergy' % (outDir), xr = [15,210],
yr = [1e-3, .11], xlog = True, ylog = True,
data = [ np.array([[15,0.1,0,0,0],[210,0.1,0,0,0]]), graph_data_sn ],
properties = [
'with lines lt 2 lw 4 lc 0',
'lt 1 lw 3 lc %s pt 18 ps 2' % default_colors[0]
],
titles = ['']*2,
xlabel = '{/Symbol \326}s_{NN} (GeV)',
ylabel = 'S/B for 0.3 < M_{ee} < 0.75 GeV/c^{2}',
lmargin = 0.1, gpcalls = [
'nokey', 'format x "%g"',
'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
],
labels = { 'p+p': (100, 0.09) }
)
return 'done'
|
plot background methods and S/B vs energy
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_background.py#L26-L121
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n",
"def getEnergy4Key(energy):\n if energy == '19': return '19.6'\n if energy == '62': return '62.4'\n return energy\n"
] |
import logging, argparse, re, os, glob
import numpy as np
from .utils import getWorkDirs, getEnergy4Key
from ..ccsgp.ccsgp import make_plot, make_panel
from ..ccsgp.config import default_colors
from ..ccsgp.utils import colorscale
from collections import OrderedDict
from fnmatch import fnmatch
MIL = 1e6
NEVTS = { '19': 32.2307*MIL, '27': 63.6828*MIL, '39': 122.390*MIL, '62': 59.4631*MIL}
mee_ranges = OrderedDict([
('omega', [0.76, 0.82]), ('phi', [0.98, 1.06]), ('jpsi', [2.97, 3.22])
])
masses = OrderedDict([
('omega', 0.78265), ('phi', 1.019455), ('jpsi', 3.096916)
])
def getMeeLabel(s):
if s == 'pi0': return '{/Symbol \160}^0'
if s == 'omega': return '{/Symbol \167}'
if s == 'phi': return '{/Symbol \152}'
if s == 'jpsi': return 'J/{/Symbol \171}'
return s
def gp_rebin():
inDir, outDir = getWorkDirs()
data = OrderedDict()
titles = [ 'raw signal', 'rebinned raw signal', 'sigRbPtTotRaw']
lines = {'y=0.9': 'lc {} lt 2 lw 3'.format(default_colors[-4])}
min_content, max_content = 1e20, -1e20
colors = [default_colors[-9], default_colors[0], default_colors[1]]
points = [1,6,4]
for eidx,energy in enumerate(['19', '27', '19', '27', '39', '62', '39', '62']):
sgn_idx = (eidx%4)/2
ekey = ' '.join([getEnergy4Key(energy), 'GeV'])
if sgn_idx == 0: ekey += ' {}'.format(sgn_idx)
data[ekey] = [[], [], []]
for didx,dtype in enumerate(['sig', 'sigRb']):#, '../sigRbPtTotRaw']):
for idx,infile in enumerate(glob.glob(os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-integrated', '%s.dat' % dtype
)))):
if sgn_idx == 1 and fnmatch(dtype, '*sigRb*'): continue
file_url = os.path.realpath(os.path.join(inDir, infile))
data_import = np.loadtxt(open(file_url, 'rb'))
data_import = data_import[data_import[:,0]>0.1]
for i in [1,3,4]:
data_import[:,i] /= NEVTS[energy]
if dtype == 'sig': data_import[:,i] /= 2*data_import[:,2]
if dtype == 'sig':
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
if sgn_idx == 0: data_import = data_import[data_import[:,1]>0]
else: data_import = np.abs(data_import[data_import[:,1]<0])
cur_min, cur_max = min(data_import[:,1]), max(data_import[:,1])
if ( cur_min < min_content ): min_content = cur_min
if ( cur_max > max_content ): max_content = cur_max
data[ekey][0].append(data_import)
data[ekey][1].append('lt 1 lw %d lc %s pt %d' % (
3+didx, colors[didx], points[didx]))
data[ekey][2].append(titles[didx])
make_panel(
name = '%s/rebin' % outDir, dpt_dict = data, ylog = True,
xr = [0.1,3.2], yr = [2e-6, 7e-3], lines = lines, size = '7in,8in',
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]',
layout = '2x4', key = ['width -5', 'nobox'], rmargin = 0.999, tmargin = 0.999,
gpcalls = ['boxwidth 0.002', 'bars small', 'xtics (0.1,0.5,1,1.5,2,2.5,3)'],
)
return 'done'
def gp_peaks():
inDir, outDir = getWorkDirs()
data = OrderedDict()
for particle,mee_range in mee_ranges.iteritems():
for eidx,energy in enumerate(['19', '27', '39', '62']):
key = '{/=20 '+getEnergy4Key(energy)+' GeV: '+getMeeLabel(particle)
if particle == 'jpsi': key += ' {/Symbol \264}50}'
data[key] = [[], [], []]
file_url = os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'sigRbPtTotRaw.dat'))
data_import = np.loadtxt(open(file_url, 'rb'))
#mask = (data_import[:,0] > mee_range[0]) & (data_import[:,0] < mee_range[1])
#data_import = data_import[mask]
for i in [1,3,4]: data_import[:,i] /= NEVTS[energy]
data_import[:,0] -= masses[particle]
if particle == 'jpsi': data_import[:,(1,3.4)] *= 50
data_import[:,(1,3,4)] *= 1000
data_import[:,4] = 0
data[key][0].append(data_import)
data[key][1].append('with boxerrorbars lt 1 lw 3 lc %s' % default_colors[eidx])
data[key][2].append('')
make_panel(
name = '%s/peaks' % outDir, dpt_dict = data,
xr = [-0.13,0.13], yr = [0,5.5], size = '5.5in,8.5in',
xlabel = 'M_{ee} - M_{%s,%s,%s} (GeV/c^{2})' % (
getMeeLabel('omega'), getMeeLabel('phi'), getMeeLabel('jpsi')
), lmargin = 0.065,
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ 10^{-3} (GeV/c^2)^{-1} ]',
layout = '4x3', key = ['nobox'], gpcalls = ['bars small', 'boxwidth 0.002']
)
return 'done'
def gp_norm(infile):
"""indentify normalization region"""
inDir, outDir = getWorkDirs()
data, titles = [], []
for eidx,energy in enumerate(['19', '27', '39', '62']):
file_url = os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-integrated', infile+'.dat'
))
data_import = np.loadtxt(open(file_url, 'rb'))
data_import[:,1] += eidx * 0.2
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
data.append(data_import)
titles.append(' '.join([getEnergy4Key(energy), 'GeV']))
nData = len(data)
lines = dict(
('x={}'.format(1+i*0.2), 'lc {} lt 2 lw 4'.format(default_colors[-2]))
for i in range(nData)
)
lines.update(dict(
('x={}'.format(1+i*0.2+0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update(dict(
('x={}'.format(1+i*0.2-0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update({'y=0.9': 'lc {} lt 1 lw 4'.format(default_colors[-2])})
charges = '++' if infile == 'rpp' else '--'
make_plot(
name = '%s/norm_range_%s' % (outDir,infile), xr = [0,2], yr = [0.9,1.7],
data = data, properties = [
'lt 1 lw 3 lc %s pt 1' % (default_colors[i]) # (i/2)%4
for i in range(nData)
], titles = titles, size = '8in,8in',
lmargin = 0.05, rmargin = 0.99, tmargin = 0.93, bmargin = 0.14,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
lines = lines, key = [
'maxrows 1', 'nobox', 'samplen 0.1', 'width -1', 'at graph 1,1.1'
], labels = {
'SE_{%s} / ME@_{%s}^N' % (charges, charges): (0.3, 1.3)
}, gpcalls = [
'ytics (1,"1" 1.2, "1" 1.4, "1" 1.6)', 'boxwidth 0.002',
],
)
def gp_acc():
"""acceptance correction"""
inDir, outDir = getWorkDirs()
for energy in ['19', '27', '39', '62']:
data, titles = [], []
for idx,infile in enumerate(glob.glob(os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-differential', 'acPt_*.dat'
)))):
data_import = np.loadtxt(open(infile, 'rb'))
data_import[:,1] += idx * 0.2
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
data.append(data_import)
titles.append(os.path.basename(infile).split('_')[-1][:-4])
nData = len(data)
lines = dict(
('x={}'.format(1+i*0.2), 'lc {} lt 2 lw 4'.format(default_colors[-2]))
for i in range(nData)
)
lines.update(dict(
('x={}'.format(1+i*0.2+0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update(dict(
('x={}'.format(1+i*0.2-0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
make_plot(
name = '%s/accfac%s' % (outDir,energy), xr = [0,2], yr = [0.8,2],
data = data, properties = [
'lt 1 lw 3 lc %s pt 1' % (default_colors[i])
for i in range(nData)
], titles = titles, size = '8in,8in',
lmargin = 0.05, rmargin = 0.98, tmargin = 0.93, bmargin = 0.14,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
lines = lines, key = [
'maxrows 1', 'nobox', 'samplen 0.1', 'width -2', 'at graph 1,1.1'
], labels = {
'ME_{+-} / 2{/Symbol \326}ME_{++}ME_{--}': (0.3, 0.85),
' '.join([getEnergy4Key(energy), 'GeV']): (1.3, 0.85)
},
gpcalls = [ 'ytics (1,"1" 1.2, "1" 1.4, "1" 1.6, "1" 1.8)', 'boxwidth 0.002', ],
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
#gp_background()
gp_norm('rmm')
gp_norm('rpp')
gp_acc()
#gp_rebin()
#gp_peaks()
|
tschaume/ccsgp_get_started
|
ccsgp_get_started/examples/gp_background.py
|
gp_norm
|
python
|
def gp_norm(infile):
inDir, outDir = getWorkDirs()
data, titles = [], []
for eidx,energy in enumerate(['19', '27', '39', '62']):
file_url = os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-integrated', infile+'.dat'
))
data_import = np.loadtxt(open(file_url, 'rb'))
data_import[:,1] += eidx * 0.2
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
data.append(data_import)
titles.append(' '.join([getEnergy4Key(energy), 'GeV']))
nData = len(data)
lines = dict(
('x={}'.format(1+i*0.2), 'lc {} lt 2 lw 4'.format(default_colors[-2]))
for i in range(nData)
)
lines.update(dict(
('x={}'.format(1+i*0.2+0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update(dict(
('x={}'.format(1+i*0.2-0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update({'y=0.9': 'lc {} lt 1 lw 4'.format(default_colors[-2])})
charges = '++' if infile == 'rpp' else '--'
make_plot(
name = '%s/norm_range_%s' % (outDir,infile), xr = [0,2], yr = [0.9,1.7],
data = data, properties = [
'lt 1 lw 3 lc %s pt 1' % (default_colors[i]) # (i/2)%4
for i in range(nData)
], titles = titles, size = '8in,8in',
lmargin = 0.05, rmargin = 0.99, tmargin = 0.93, bmargin = 0.14,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
lines = lines, key = [
'maxrows 1', 'nobox', 'samplen 0.1', 'width -1', 'at graph 1,1.1'
], labels = {
'SE_{%s} / ME@_{%s}^N' % (charges, charges): (0.3, 1.3)
}, gpcalls = [
'ytics (1,"1" 1.2, "1" 1.4, "1" 1.6)', 'boxwidth 0.002',
],
)
|
indentify normalization region
|
train
|
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_background.py#L201-L245
|
[
"def getWorkDirs():\n \"\"\"get input/output dirs (same input/output layout as for package)\"\"\"\n # get caller module\n caller_fullurl = inspect.stack()[1][1]\n caller_relurl = os.path.relpath(caller_fullurl)\n caller_modurl = os.path.splitext(caller_relurl)[0]\n # split caller_url & append 'Dir' to package name\n dirs = caller_modurl.split('/')\n dirs[0] = 'data' # TODO de-hardcode\n # get, check and create outdir\n outDir = os.path.join(*(['output'] + dirs[1:]))\n if not os.path.exists(outDir): os.makedirs(outDir)\n # get and check indir\n dirs.append('input')\n inDir = os.path.join(*dirs)\n if not os.path.exists(inDir):\n logging.critical('create input dir %s to continue!' % inDir)\n sys.exit(1)\n return inDir, outDir\n",
"def getEnergy4Key(energy):\n if energy == '19': return '19.6'\n if energy == '62': return '62.4'\n return energy\n"
] |
import logging, argparse, re, os, glob
import numpy as np
from .utils import getWorkDirs, getEnergy4Key
from ..ccsgp.ccsgp import make_plot, make_panel
from ..ccsgp.config import default_colors
from ..ccsgp.utils import colorscale
from collections import OrderedDict
from fnmatch import fnmatch
MIL = 1e6
NEVTS = { '19': 32.2307*MIL, '27': 63.6828*MIL, '39': 122.390*MIL, '62': 59.4631*MIL}
mee_ranges = OrderedDict([
('omega', [0.76, 0.82]), ('phi', [0.98, 1.06]), ('jpsi', [2.97, 3.22])
])
masses = OrderedDict([
('omega', 0.78265), ('phi', 1.019455), ('jpsi', 3.096916)
])
def getMeeLabel(s):
if s == 'pi0': return '{/Symbol \160}^0'
if s == 'omega': return '{/Symbol \167}'
if s == 'phi': return '{/Symbol \152}'
if s == 'jpsi': return 'J/{/Symbol \171}'
return s
def gp_background():
""" plot background methods and S/B vs energy """
inDir, outDir = getWorkDirs()
data, REBIN = OrderedDict(), None
titles = [ 'SE_{+-}', 'SE@^{corr}_{/Symbol \\261\\261}', 'ME@^{N}_{+-}' ]
Apm = OrderedDict([
('19', 0.026668), ('27', 0.026554), ('39', 0.026816), ('62', 0.026726)
])
fake = np.array([[-1, 1, 0, 0, 0]])
lines = {'y=0.9': 'lc {} lt 2 lw 3'.format(default_colors[-4])}
for energy in ['19', '27', '39', '62']:
ekey = ' '.join([getEnergy4Key(energy), 'GeV'])
data[ekey] = [[], [], []]
for didx,dtype in enumerate(['epsPt', 'ngmPt_corr', 'epmPt']):
for idx,infile in enumerate(glob.glob(os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-differential', '%s_*.dat' % dtype
)))):
file_url = os.path.realpath(os.path.join(inDir, infile))
data_import = np.loadtxt(open(file_url, 'rb'))
if REBIN is None: REBIN = int(data_import[-1][2]*2*1000) # MeV
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
if dtype == 'ngmPt_corr':
data_import = data_import[data_import[:,0] <= 0.9]
if dtype == 'epmPt':
data_import = data_import[data_import[:,0] > 0.9]
data_import[:,(1,4)] *= Apm[energy]
col = colorscale(default_colors[didx], 1.+idx*0.2)
momrange = os.path.basename(infile).split('_')[-1][:-4]
if idx < 1:
data[ekey][0].append(fake)
data[ekey][1].append('with lines lt 1 lw 5 lc %s' % col)
data[ekey][2].append(titles[didx])
data[ekey][0].append(data_import)
data[ekey][1].append('lt 1 lw 3 lc %s pt 0' % col)
data[ekey][2].append('')
# unsubtracted background
make_panel(
name = '%s/methods' % outDir, dpt_dict = data,
xr = [0,3.35], yr = [0.9,2e5], ylog = True, lines = lines,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = 'counts / %d MeV/c^{2}' % REBIN, layout = '2x2',
key = ['spacing 1.6', 'nobox'], gpcalls = ['boxwidth 0.002'],
labels = {
'{/=20 the lighter the color, the higher the p_{T}}': (1.5, 1e5)
}, key_subplot_id = 1,
)
return 'done'
# background ratio and acc.corr.
make_plot(
name = '%s/ratios%s' % (outDir, energy),
xr = [0,1.6], yr = [0.95,1.2],
data = graph_data[3:],
properties = [
'with filledcurves lt 1 lw 3 lc %s pt 0' % default_colors[i]
for i in xrange(2)
],
titles = [
'SE_{/Symbol \\261\\261} / ME_{/Symbol \\261\\261}',
'f_{acc} = ME_{+-} / ME_{/Symbol \\261\\261}'
],
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = '', key = [ 'width -2' ],
labels = { '%s GeV' % energy: (0.4, 0.97) }
)
# signal-to-background ratio in rho/omega region vs. energy
graph_data_sn = []
for infile in os.listdir(os.path.join(inDir, 'sn')):
energy = re.compile('\d+').search(infile).group()
file_url = os.path.join(inDir, 'sn', infile)
data_import = np.loadtxt(open(file_url, 'rb'))
mask = (data_import[:,0] > 0.3) & (data_import[:,0] < 0.75)
data_import = data_import[mask]
weights = 1./data_import[:,3]
sn = np.average(data_import[:,1], weights = weights)
sn_err = np.average((data_import[:,1]-sn)**2, weights = weights)
graph_data_sn.append([float(getEnergy4Key(energy)), sn, 0, sn_err, 0])
graph_data_sn = np.array(graph_data_sn)
make_plot(
name = '%s/SNvsEnergy' % (outDir), xr = [15,210],
yr = [1e-3, .11], xlog = True, ylog = True,
data = [ np.array([[15,0.1,0,0,0],[210,0.1,0,0,0]]), graph_data_sn ],
properties = [
'with lines lt 2 lw 4 lc 0',
'lt 1 lw 3 lc %s pt 18 ps 2' % default_colors[0]
],
titles = ['']*2,
xlabel = '{/Symbol \326}s_{NN} (GeV)',
ylabel = 'S/B for 0.3 < M_{ee} < 0.75 GeV/c^{2}',
lmargin = 0.1, gpcalls = [
'nokey', 'format x "%g"',
'xtics (20,"" 30, 40,"" 50, 60,"" 70,"" 80,"" 90, 100, 200)',
],
labels = { 'p+p': (100, 0.09) }
)
return 'done'
def gp_rebin():
inDir, outDir = getWorkDirs()
data = OrderedDict()
titles = [ 'raw signal', 'rebinned raw signal', 'sigRbPtTotRaw']
lines = {'y=0.9': 'lc {} lt 2 lw 3'.format(default_colors[-4])}
min_content, max_content = 1e20, -1e20
colors = [default_colors[-9], default_colors[0], default_colors[1]]
points = [1,6,4]
for eidx,energy in enumerate(['19', '27', '19', '27', '39', '62', '39', '62']):
sgn_idx = (eidx%4)/2
ekey = ' '.join([getEnergy4Key(energy), 'GeV'])
if sgn_idx == 0: ekey += ' {}'.format(sgn_idx)
data[ekey] = [[], [], []]
for didx,dtype in enumerate(['sig', 'sigRb']):#, '../sigRbPtTotRaw']):
for idx,infile in enumerate(glob.glob(os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-integrated', '%s.dat' % dtype
)))):
if sgn_idx == 1 and fnmatch(dtype, '*sigRb*'): continue
file_url = os.path.realpath(os.path.join(inDir, infile))
data_import = np.loadtxt(open(file_url, 'rb'))
data_import = data_import[data_import[:,0]>0.1]
for i in [1,3,4]:
data_import[:,i] /= NEVTS[energy]
if dtype == 'sig': data_import[:,i] /= 2*data_import[:,2]
if dtype == 'sig':
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
if sgn_idx == 0: data_import = data_import[data_import[:,1]>0]
else: data_import = np.abs(data_import[data_import[:,1]<0])
cur_min, cur_max = min(data_import[:,1]), max(data_import[:,1])
if ( cur_min < min_content ): min_content = cur_min
if ( cur_max > max_content ): max_content = cur_max
data[ekey][0].append(data_import)
data[ekey][1].append('lt 1 lw %d lc %s pt %d' % (
3+didx, colors[didx], points[didx]))
data[ekey][2].append(titles[didx])
make_panel(
name = '%s/rebin' % outDir, dpt_dict = data, ylog = True,
xr = [0.1,3.2], yr = [2e-6, 7e-3], lines = lines, size = '7in,8in',
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ (GeV/c^2)^{-1} ]',
layout = '2x4', key = ['width -5', 'nobox'], rmargin = 0.999, tmargin = 0.999,
gpcalls = ['boxwidth 0.002', 'bars small', 'xtics (0.1,0.5,1,1.5,2,2.5,3)'],
)
return 'done'
def gp_peaks():
inDir, outDir = getWorkDirs()
data = OrderedDict()
for particle,mee_range in mee_ranges.iteritems():
for eidx,energy in enumerate(['19', '27', '39', '62']):
key = '{/=20 '+getEnergy4Key(energy)+' GeV: '+getMeeLabel(particle)
if particle == 'jpsi': key += ' {/Symbol \264}50}'
data[key] = [[], [], []]
file_url = os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'sigRbPtTotRaw.dat'))
data_import = np.loadtxt(open(file_url, 'rb'))
#mask = (data_import[:,0] > mee_range[0]) & (data_import[:,0] < mee_range[1])
#data_import = data_import[mask]
for i in [1,3,4]: data_import[:,i] /= NEVTS[energy]
data_import[:,0] -= masses[particle]
if particle == 'jpsi': data_import[:,(1,3.4)] *= 50
data_import[:,(1,3,4)] *= 1000
data_import[:,4] = 0
data[key][0].append(data_import)
data[key][1].append('with boxerrorbars lt 1 lw 3 lc %s' % default_colors[eidx])
data[key][2].append('')
make_panel(
name = '%s/peaks' % outDir, dpt_dict = data,
xr = [-0.13,0.13], yr = [0,5.5], size = '5.5in,8.5in',
xlabel = 'M_{ee} - M_{%s,%s,%s} (GeV/c^{2})' % (
getMeeLabel('omega'), getMeeLabel('phi'), getMeeLabel('jpsi')
), lmargin = 0.065,
ylabel = '1/N@_{mb}^{evt} dN@_{ee}^{acc.}/dM_{ee} [ 10^{-3} (GeV/c^2)^{-1} ]',
layout = '4x3', key = ['nobox'], gpcalls = ['bars small', 'boxwidth 0.002']
)
return 'done'
def gp_acc():
"""acceptance correction"""
inDir, outDir = getWorkDirs()
for energy in ['19', '27', '39', '62']:
data, titles = [], []
for idx,infile in enumerate(glob.glob(os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-differential', 'acPt_*.dat'
)))):
data_import = np.loadtxt(open(infile, 'rb'))
data_import[:,1] += idx * 0.2
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
data.append(data_import)
titles.append(os.path.basename(infile).split('_')[-1][:-4])
nData = len(data)
lines = dict(
('x={}'.format(1+i*0.2), 'lc {} lt 2 lw 4'.format(default_colors[-2]))
for i in range(nData)
)
lines.update(dict(
('x={}'.format(1+i*0.2+0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update(dict(
('x={}'.format(1+i*0.2-0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
make_plot(
name = '%s/accfac%s' % (outDir,energy), xr = [0,2], yr = [0.8,2],
data = data, properties = [
'lt 1 lw 3 lc %s pt 1' % (default_colors[i])
for i in range(nData)
], titles = titles, size = '8in,8in',
lmargin = 0.05, rmargin = 0.98, tmargin = 0.93, bmargin = 0.14,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
lines = lines, key = [
'maxrows 1', 'nobox', 'samplen 0.1', 'width -2', 'at graph 1,1.1'
], labels = {
'ME_{+-} / 2{/Symbol \326}ME_{++}ME_{--}': (0.3, 0.85),
' '.join([getEnergy4Key(energy), 'GeV']): (1.3, 0.85)
},
gpcalls = [ 'ytics (1,"1" 1.2, "1" 1.4, "1" 1.6, "1" 1.8)', 'boxwidth 0.002', ],
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="show log output", action="store_true")
args = parser.parse_args()
loglevel = 'DEBUG' if args.log else 'WARNING'
logging.basicConfig(
format='%(message)s', level=getattr(logging, loglevel)
)
#gp_background()
gp_norm('rmm')
gp_norm('rpp')
gp_acc()
#gp_rebin()
#gp_peaks()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.