repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
linuxsoftware/ls.joyous
|
ls/joyous/models/events.py
|
RecurringEventPage._getMyFirstDatetimeFrom
|
def _getMyFirstDatetimeFrom(self):
"""
The datetime this event first started, or None if it never did.
"""
myStartDt = getAwareDatetime(self.repeat.dtstart, None,
self.tz, dt.time.min)
return self.__after(myStartDt, excludeCancellations=False)
|
python
|
def _getMyFirstDatetimeFrom(self):
"""
The datetime this event first started, or None if it never did.
"""
myStartDt = getAwareDatetime(self.repeat.dtstart, None,
self.tz, dt.time.min)
return self.__after(myStartDt, excludeCancellations=False)
|
[
"def",
"_getMyFirstDatetimeFrom",
"(",
"self",
")",
":",
"myStartDt",
"=",
"getAwareDatetime",
"(",
"self",
".",
"repeat",
".",
"dtstart",
",",
"None",
",",
"self",
".",
"tz",
",",
"dt",
".",
"time",
".",
"min",
")",
"return",
"self",
".",
"__after",
"(",
"myStartDt",
",",
"excludeCancellations",
"=",
"False",
")"
] |
The datetime this event first started, or None if it never did.
|
[
"The",
"datetime",
"this",
"event",
"first",
"started",
"or",
"None",
"if",
"it",
"never",
"did",
"."
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1145-L1151
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/models/events.py
|
RecurringEventPage._getMyFirstDatetimeTo
|
def _getMyFirstDatetimeTo(self):
"""
The datetime this event first finished, or None if it never did.
"""
myFirstDt = self._getMyFirstDatetimeFrom()
if myFirstDt is not None:
daysDelta = dt.timedelta(days=self.num_days - 1)
return getAwareDatetime(myFirstDt.date() + daysDelta,
self.time_to,
self.tz, dt.time.max)
|
python
|
def _getMyFirstDatetimeTo(self):
"""
The datetime this event first finished, or None if it never did.
"""
myFirstDt = self._getMyFirstDatetimeFrom()
if myFirstDt is not None:
daysDelta = dt.timedelta(days=self.num_days - 1)
return getAwareDatetime(myFirstDt.date() + daysDelta,
self.time_to,
self.tz, dt.time.max)
|
[
"def",
"_getMyFirstDatetimeTo",
"(",
"self",
")",
":",
"myFirstDt",
"=",
"self",
".",
"_getMyFirstDatetimeFrom",
"(",
")",
"if",
"myFirstDt",
"is",
"not",
"None",
":",
"daysDelta",
"=",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"self",
".",
"num_days",
"-",
"1",
")",
"return",
"getAwareDatetime",
"(",
"myFirstDt",
".",
"date",
"(",
")",
"+",
"daysDelta",
",",
"self",
".",
"time_to",
",",
"self",
".",
"tz",
",",
"dt",
".",
"time",
".",
"max",
")"
] |
The datetime this event first finished, or None if it never did.
|
[
"The",
"datetime",
"this",
"event",
"first",
"finished",
"or",
"None",
"if",
"it",
"never",
"did",
"."
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1153-L1162
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/models/events.py
|
EventExceptionBase.local_title
|
def local_title(self):
"""
Localised version of the human-readable title of the page.
"""
name = self.title.partition(" for ")[0]
exceptDate = getLocalDate(self.except_date, self.time_from, self.tz)
title = _("{exception} for {date}").format(exception=_(name),
date=dateFormat(exceptDate))
return title
|
python
|
def local_title(self):
"""
Localised version of the human-readable title of the page.
"""
name = self.title.partition(" for ")[0]
exceptDate = getLocalDate(self.except_date, self.time_from, self.tz)
title = _("{exception} for {date}").format(exception=_(name),
date=dateFormat(exceptDate))
return title
|
[
"def",
"local_title",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"title",
".",
"partition",
"(",
"\" for \"",
")",
"[",
"0",
"]",
"exceptDate",
"=",
"getLocalDate",
"(",
"self",
".",
"except_date",
",",
"self",
".",
"time_from",
",",
"self",
".",
"tz",
")",
"title",
"=",
"_",
"(",
"\"{exception} for {date}\"",
")",
".",
"format",
"(",
"exception",
"=",
"_",
"(",
"name",
")",
",",
"date",
"=",
"dateFormat",
"(",
"exceptDate",
")",
")",
"return",
"title"
] |
Localised version of the human-readable title of the page.
|
[
"Localised",
"version",
"of",
"the",
"human",
"-",
"readable",
"title",
"of",
"the",
"page",
"."
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1338-L1346
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/models/events.py
|
EventExceptionBase.full_clean
|
def full_clean(self, *args, **kwargs):
"""
Apply fixups that need to happen before per-field validation occurs.
Sets the page's title.
"""
name = getattr(self, 'name', self.slugName.title())
self.title = "{} for {}".format(name, dateFormat(self.except_date))
self.slug = "{}-{}".format(self.except_date, self.slugName)
super().full_clean(*args, **kwargs)
|
python
|
def full_clean(self, *args, **kwargs):
"""
Apply fixups that need to happen before per-field validation occurs.
Sets the page's title.
"""
name = getattr(self, 'name', self.slugName.title())
self.title = "{} for {}".format(name, dateFormat(self.except_date))
self.slug = "{}-{}".format(self.except_date, self.slugName)
super().full_clean(*args, **kwargs)
|
[
"def",
"full_clean",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"getattr",
"(",
"self",
",",
"'name'",
",",
"self",
".",
"slugName",
".",
"title",
"(",
")",
")",
"self",
".",
"title",
"=",
"\"{} for {}\"",
".",
"format",
"(",
"name",
",",
"dateFormat",
"(",
"self",
".",
"except_date",
")",
")",
"self",
".",
"slug",
"=",
"\"{}-{}\"",
".",
"format",
"(",
"self",
".",
"except_date",
",",
"self",
".",
"slugName",
")",
"super",
"(",
")",
".",
"full_clean",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Apply fixups that need to happen before per-field validation occurs.
Sets the page's title.
|
[
"Apply",
"fixups",
"that",
"need",
"to",
"happen",
"before",
"per",
"-",
"field",
"validation",
"occurs",
".",
"Sets",
"the",
"page",
"s",
"title",
"."
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1403-L1411
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/models/events.py
|
PostponementPage.what
|
def what(self):
"""
May return a 'postponed' or 'rescheduled' string depending what
the start and finish time of the event has been changed to.
"""
originalFromDt = dt.datetime.combine(self.except_date,
timeFrom(self.overrides.time_from))
changedFromDt = dt.datetime.combine(self.date, timeFrom(self.time_from))
originalDaysDelta = dt.timedelta(days=self.overrides.num_days - 1)
originalToDt = getAwareDatetime(self.except_date + originalDaysDelta,
self.overrides.time_to, self.tz)
changedDaysDelta = dt.timedelta(days=self.num_days - 1)
changedToDt = getAwareDatetime(self.except_date + changedDaysDelta,
self.time_to, self.tz)
if originalFromDt < changedFromDt:
return _("Postponed")
elif originalFromDt > changedFromDt or originalToDt != changedToDt:
return _("Rescheduled")
else:
return None
|
python
|
def what(self):
"""
May return a 'postponed' or 'rescheduled' string depending what
the start and finish time of the event has been changed to.
"""
originalFromDt = dt.datetime.combine(self.except_date,
timeFrom(self.overrides.time_from))
changedFromDt = dt.datetime.combine(self.date, timeFrom(self.time_from))
originalDaysDelta = dt.timedelta(days=self.overrides.num_days - 1)
originalToDt = getAwareDatetime(self.except_date + originalDaysDelta,
self.overrides.time_to, self.tz)
changedDaysDelta = dt.timedelta(days=self.num_days - 1)
changedToDt = getAwareDatetime(self.except_date + changedDaysDelta,
self.time_to, self.tz)
if originalFromDt < changedFromDt:
return _("Postponed")
elif originalFromDt > changedFromDt or originalToDt != changedToDt:
return _("Rescheduled")
else:
return None
|
[
"def",
"what",
"(",
"self",
")",
":",
"originalFromDt",
"=",
"dt",
".",
"datetime",
".",
"combine",
"(",
"self",
".",
"except_date",
",",
"timeFrom",
"(",
"self",
".",
"overrides",
".",
"time_from",
")",
")",
"changedFromDt",
"=",
"dt",
".",
"datetime",
".",
"combine",
"(",
"self",
".",
"date",
",",
"timeFrom",
"(",
"self",
".",
"time_from",
")",
")",
"originalDaysDelta",
"=",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"self",
".",
"overrides",
".",
"num_days",
"-",
"1",
")",
"originalToDt",
"=",
"getAwareDatetime",
"(",
"self",
".",
"except_date",
"+",
"originalDaysDelta",
",",
"self",
".",
"overrides",
".",
"time_to",
",",
"self",
".",
"tz",
")",
"changedDaysDelta",
"=",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"self",
".",
"num_days",
"-",
"1",
")",
"changedToDt",
"=",
"getAwareDatetime",
"(",
"self",
".",
"except_date",
"+",
"changedDaysDelta",
",",
"self",
".",
"time_to",
",",
"self",
".",
"tz",
")",
"if",
"originalFromDt",
"<",
"changedFromDt",
":",
"return",
"_",
"(",
"\"Postponed\"",
")",
"elif",
"originalFromDt",
">",
"changedFromDt",
"or",
"originalToDt",
"!=",
"changedToDt",
":",
"return",
"_",
"(",
"\"Rescheduled\"",
")",
"else",
":",
"return",
"None"
] |
May return a 'postponed' or 'rescheduled' string depending what
the start and finish time of the event has been changed to.
|
[
"May",
"return",
"a",
"postponed",
"or",
"rescheduled",
"string",
"depending",
"what",
"the",
"start",
"and",
"finish",
"time",
"of",
"the",
"event",
"has",
"been",
"changed",
"to",
"."
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1750-L1769
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/formats/ical.py
|
VEvent._convertTZ
|
def _convertTZ(self):
"""Will convert UTC datetimes to the current local timezone"""
tz = timezone.get_current_timezone()
dtstart = self['DTSTART']
dtend = self['DTEND']
if dtstart.zone() == "UTC":
dtstart.dt = dtstart.dt.astimezone(tz)
if dtend.zone() == "UTC":
dtend.dt = dtend.dt.astimezone(tz)
|
python
|
def _convertTZ(self):
"""Will convert UTC datetimes to the current local timezone"""
tz = timezone.get_current_timezone()
dtstart = self['DTSTART']
dtend = self['DTEND']
if dtstart.zone() == "UTC":
dtstart.dt = dtstart.dt.astimezone(tz)
if dtend.zone() == "UTC":
dtend.dt = dtend.dt.astimezone(tz)
|
[
"def",
"_convertTZ",
"(",
"self",
")",
":",
"tz",
"=",
"timezone",
".",
"get_current_timezone",
"(",
")",
"dtstart",
"=",
"self",
"[",
"'DTSTART'",
"]",
"dtend",
"=",
"self",
"[",
"'DTEND'",
"]",
"if",
"dtstart",
".",
"zone",
"(",
")",
"==",
"\"UTC\"",
":",
"dtstart",
".",
"dt",
"=",
"dtstart",
".",
"dt",
".",
"astimezone",
"(",
"tz",
")",
"if",
"dtend",
".",
"zone",
"(",
")",
"==",
"\"UTC\"",
":",
"dtend",
".",
"dt",
"=",
"dtend",
".",
"dt",
".",
"astimezone",
"(",
"tz",
")"
] |
Will convert UTC datetimes to the current local timezone
|
[
"Will",
"convert",
"UTC",
"datetimes",
"to",
"the",
"current",
"local",
"timezone"
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/formats/ical.py#L518-L526
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/formats/vtimezone.py
|
to_naive_utc
|
def to_naive_utc(dtime):
"""convert a datetime object to UTC and than remove the tzinfo, if
datetime is naive already, return it
"""
if not hasattr(dtime, 'tzinfo') or dtime.tzinfo is None:
return dtime
dtime_utc = dtime.astimezone(pytz.UTC)
dtime_naive = dtime_utc.replace(tzinfo=None)
return dtime_naive
|
python
|
def to_naive_utc(dtime):
"""convert a datetime object to UTC and than remove the tzinfo, if
datetime is naive already, return it
"""
if not hasattr(dtime, 'tzinfo') or dtime.tzinfo is None:
return dtime
dtime_utc = dtime.astimezone(pytz.UTC)
dtime_naive = dtime_utc.replace(tzinfo=None)
return dtime_naive
|
[
"def",
"to_naive_utc",
"(",
"dtime",
")",
":",
"if",
"not",
"hasattr",
"(",
"dtime",
",",
"'tzinfo'",
")",
"or",
"dtime",
".",
"tzinfo",
"is",
"None",
":",
"return",
"dtime",
"dtime_utc",
"=",
"dtime",
".",
"astimezone",
"(",
"pytz",
".",
"UTC",
")",
"dtime_naive",
"=",
"dtime_utc",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"return",
"dtime_naive"
] |
convert a datetime object to UTC and than remove the tzinfo, if
datetime is naive already, return it
|
[
"convert",
"a",
"datetime",
"object",
"to",
"UTC",
"and",
"than",
"remove",
"the",
"tzinfo",
"if",
"datetime",
"is",
"naive",
"already",
"return",
"it"
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/formats/vtimezone.py#L29-L38
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/formats/vtimezone.py
|
create_timezone
|
def create_timezone(tz, first_date=None, last_date=None):
"""
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
"""
if isinstance(tz, pytz.tzinfo.StaticTzInfo):
return _create_timezone_static(tz)
# TODO last_date = None, recurring to infinity
first_date = dt.datetime.today() if not first_date else to_naive_utc(first_date)
last_date = dt.datetime.today() if not last_date else to_naive_utc(last_date)
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
# This is not a reliable way of determining if a transition is for
# daylight savings.
# From 1927 to 1941 New Zealand had GMT+11:30 (NZ Mean Time) as standard
# and GMT+12:00 (NZ Summer Time) as daylight savings time.
# From 1941 GMT+12:00 (NZ Standard Time) became standard time.
# So NZST (NZ Summer/Standard Time) can refer to standard or daylight
# savings time. And this code depends on the random order the _tzinfos
# are returned.
# dst = {
# one[2]: 'DST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# bst = {
# one[2]: 'BST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# ...
# if dst[name] or bst[name]:
# looking for the first and last transition time we need to include
first_num, last_num = 0, len(tz._utc_transition_times) - 1
first_tt = tz._utc_transition_times[0]
last_tt = tz._utc_transition_times[-1]
for num, transtime in enumerate(tz._utc_transition_times):
if transtime > first_tt and transtime < first_date:
first_num = num
first_tt = transtime
if transtime < last_tt and transtime > last_date:
last_num = num
last_tt = transtime
timezones = dict()
for num in range(first_num, last_num + 1):
name = tz._transition_info[num][2]
if name in timezones:
ttime = tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None)
if 'RDATE' in timezones[name]:
timezones[name]['RDATE'].dts.append(
icalendar.prop.vDDDTypes(ttime))
else:
timezones[name].add('RDATE', ttime)
continue
if tz._transition_info[num][1]:
subcomp = icalendar.TimezoneDaylight()
else:
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz._transition_info[num][2])
subcomp.add(
'DTSTART',
tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None))
subcomp.add('TZOFFSETTO', tz._transition_info[num][0])
subcomp.add('TZOFFSETFROM', tz._transition_info[num - 1][0])
timezones[name] = subcomp
for subcomp in timezones.values():
timezone.add_component(subcomp)
return timezone
|
python
|
def create_timezone(tz, first_date=None, last_date=None):
"""
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
"""
if isinstance(tz, pytz.tzinfo.StaticTzInfo):
return _create_timezone_static(tz)
# TODO last_date = None, recurring to infinity
first_date = dt.datetime.today() if not first_date else to_naive_utc(first_date)
last_date = dt.datetime.today() if not last_date else to_naive_utc(last_date)
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
# This is not a reliable way of determining if a transition is for
# daylight savings.
# From 1927 to 1941 New Zealand had GMT+11:30 (NZ Mean Time) as standard
# and GMT+12:00 (NZ Summer Time) as daylight savings time.
# From 1941 GMT+12:00 (NZ Standard Time) became standard time.
# So NZST (NZ Summer/Standard Time) can refer to standard or daylight
# savings time. And this code depends on the random order the _tzinfos
# are returned.
# dst = {
# one[2]: 'DST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# bst = {
# one[2]: 'BST' in two.__repr__()
# for one, two in iter(tz._tzinfos.items())
# }
# ...
# if dst[name] or bst[name]:
# looking for the first and last transition time we need to include
first_num, last_num = 0, len(tz._utc_transition_times) - 1
first_tt = tz._utc_transition_times[0]
last_tt = tz._utc_transition_times[-1]
for num, transtime in enumerate(tz._utc_transition_times):
if transtime > first_tt and transtime < first_date:
first_num = num
first_tt = transtime
if transtime < last_tt and transtime > last_date:
last_num = num
last_tt = transtime
timezones = dict()
for num in range(first_num, last_num + 1):
name = tz._transition_info[num][2]
if name in timezones:
ttime = tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None)
if 'RDATE' in timezones[name]:
timezones[name]['RDATE'].dts.append(
icalendar.prop.vDDDTypes(ttime))
else:
timezones[name].add('RDATE', ttime)
continue
if tz._transition_info[num][1]:
subcomp = icalendar.TimezoneDaylight()
else:
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz._transition_info[num][2])
subcomp.add(
'DTSTART',
tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None))
subcomp.add('TZOFFSETTO', tz._transition_info[num][0])
subcomp.add('TZOFFSETFROM', tz._transition_info[num - 1][0])
timezones[name] = subcomp
for subcomp in timezones.values():
timezone.add_component(subcomp)
return timezone
|
[
"def",
"create_timezone",
"(",
"tz",
",",
"first_date",
"=",
"None",
",",
"last_date",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"tz",
",",
"pytz",
".",
"tzinfo",
".",
"StaticTzInfo",
")",
":",
"return",
"_create_timezone_static",
"(",
"tz",
")",
"# TODO last_date = None, recurring to infinity",
"first_date",
"=",
"dt",
".",
"datetime",
".",
"today",
"(",
")",
"if",
"not",
"first_date",
"else",
"to_naive_utc",
"(",
"first_date",
")",
"last_date",
"=",
"dt",
".",
"datetime",
".",
"today",
"(",
")",
"if",
"not",
"last_date",
"else",
"to_naive_utc",
"(",
"last_date",
")",
"timezone",
"=",
"icalendar",
".",
"Timezone",
"(",
")",
"timezone",
".",
"add",
"(",
"'TZID'",
",",
"tz",
")",
"# This is not a reliable way of determining if a transition is for",
"# daylight savings.",
"# From 1927 to 1941 New Zealand had GMT+11:30 (NZ Mean Time) as standard",
"# and GMT+12:00 (NZ Summer Time) as daylight savings time.",
"# From 1941 GMT+12:00 (NZ Standard Time) became standard time.",
"# So NZST (NZ Summer/Standard Time) can refer to standard or daylight",
"# savings time. And this code depends on the random order the _tzinfos",
"# are returned.",
"# dst = {",
"# one[2]: 'DST' in two.__repr__()",
"# for one, two in iter(tz._tzinfos.items())",
"# }",
"# bst = {",
"# one[2]: 'BST' in two.__repr__()",
"# for one, two in iter(tz._tzinfos.items())",
"# }",
"# ...",
"# if dst[name] or bst[name]:",
"# looking for the first and last transition time we need to include",
"first_num",
",",
"last_num",
"=",
"0",
",",
"len",
"(",
"tz",
".",
"_utc_transition_times",
")",
"-",
"1",
"first_tt",
"=",
"tz",
".",
"_utc_transition_times",
"[",
"0",
"]",
"last_tt",
"=",
"tz",
".",
"_utc_transition_times",
"[",
"-",
"1",
"]",
"for",
"num",
",",
"transtime",
"in",
"enumerate",
"(",
"tz",
".",
"_utc_transition_times",
")",
":",
"if",
"transtime",
">",
"first_tt",
"and",
"transtime",
"<",
"first_date",
":",
"first_num",
"=",
"num",
"first_tt",
"=",
"transtime",
"if",
"transtime",
"<",
"last_tt",
"and",
"transtime",
">",
"last_date",
":",
"last_num",
"=",
"num",
"last_tt",
"=",
"transtime",
"timezones",
"=",
"dict",
"(",
")",
"for",
"num",
"in",
"range",
"(",
"first_num",
",",
"last_num",
"+",
"1",
")",
":",
"name",
"=",
"tz",
".",
"_transition_info",
"[",
"num",
"]",
"[",
"2",
"]",
"if",
"name",
"in",
"timezones",
":",
"ttime",
"=",
"tz",
".",
"fromutc",
"(",
"tz",
".",
"_utc_transition_times",
"[",
"num",
"]",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"if",
"'RDATE'",
"in",
"timezones",
"[",
"name",
"]",
":",
"timezones",
"[",
"name",
"]",
"[",
"'RDATE'",
"]",
".",
"dts",
".",
"append",
"(",
"icalendar",
".",
"prop",
".",
"vDDDTypes",
"(",
"ttime",
")",
")",
"else",
":",
"timezones",
"[",
"name",
"]",
".",
"add",
"(",
"'RDATE'",
",",
"ttime",
")",
"continue",
"if",
"tz",
".",
"_transition_info",
"[",
"num",
"]",
"[",
"1",
"]",
":",
"subcomp",
"=",
"icalendar",
".",
"TimezoneDaylight",
"(",
")",
"else",
":",
"subcomp",
"=",
"icalendar",
".",
"TimezoneStandard",
"(",
")",
"subcomp",
".",
"add",
"(",
"'TZNAME'",
",",
"tz",
".",
"_transition_info",
"[",
"num",
"]",
"[",
"2",
"]",
")",
"subcomp",
".",
"add",
"(",
"'DTSTART'",
",",
"tz",
".",
"fromutc",
"(",
"tz",
".",
"_utc_transition_times",
"[",
"num",
"]",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
")",
"subcomp",
".",
"add",
"(",
"'TZOFFSETTO'",
",",
"tz",
".",
"_transition_info",
"[",
"num",
"]",
"[",
"0",
"]",
")",
"subcomp",
".",
"add",
"(",
"'TZOFFSETFROM'",
",",
"tz",
".",
"_transition_info",
"[",
"num",
"-",
"1",
"]",
"[",
"0",
"]",
")",
"timezones",
"[",
"name",
"]",
"=",
"subcomp",
"for",
"subcomp",
"in",
"timezones",
".",
"values",
"(",
")",
":",
"timezone",
".",
"add_component",
"(",
"subcomp",
")",
"return",
"timezone"
] |
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
|
[
"create",
"an",
"icalendar",
"vtimezone",
"from",
"a",
"pytz",
".",
"tzinfo",
"object"
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/formats/vtimezone.py#L40-L138
|
train
|
linuxsoftware/ls.joyous
|
ls/joyous/formats/vtimezone.py
|
_create_timezone_static
|
def _create_timezone_static(tz):
"""create an icalendar vtimezone from a pytz.tzinfo.StaticTzInfo
:param tz: the timezone
:type tz: pytz.tzinfo.StaticTzInfo
:returns: timezone information
:rtype: icalendar.Timezone()
"""
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz)
subcomp.add('DTSTART', dt.datetime(1601, 1, 1))
subcomp.add('RDATE', dt.datetime(1601, 1, 1))
subcomp.add('TZOFFSETTO', tz._utcoffset)
subcomp.add('TZOFFSETFROM', tz._utcoffset)
timezone.add_component(subcomp)
return timezone
|
python
|
def _create_timezone_static(tz):
"""create an icalendar vtimezone from a pytz.tzinfo.StaticTzInfo
:param tz: the timezone
:type tz: pytz.tzinfo.StaticTzInfo
:returns: timezone information
:rtype: icalendar.Timezone()
"""
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz)
subcomp.add('DTSTART', dt.datetime(1601, 1, 1))
subcomp.add('RDATE', dt.datetime(1601, 1, 1))
subcomp.add('TZOFFSETTO', tz._utcoffset)
subcomp.add('TZOFFSETFROM', tz._utcoffset)
timezone.add_component(subcomp)
return timezone
|
[
"def",
"_create_timezone_static",
"(",
"tz",
")",
":",
"timezone",
"=",
"icalendar",
".",
"Timezone",
"(",
")",
"timezone",
".",
"add",
"(",
"'TZID'",
",",
"tz",
")",
"subcomp",
"=",
"icalendar",
".",
"TimezoneStandard",
"(",
")",
"subcomp",
".",
"add",
"(",
"'TZNAME'",
",",
"tz",
")",
"subcomp",
".",
"add",
"(",
"'DTSTART'",
",",
"dt",
".",
"datetime",
"(",
"1601",
",",
"1",
",",
"1",
")",
")",
"subcomp",
".",
"add",
"(",
"'RDATE'",
",",
"dt",
".",
"datetime",
"(",
"1601",
",",
"1",
",",
"1",
")",
")",
"subcomp",
".",
"add",
"(",
"'TZOFFSETTO'",
",",
"tz",
".",
"_utcoffset",
")",
"subcomp",
".",
"add",
"(",
"'TZOFFSETFROM'",
",",
"tz",
".",
"_utcoffset",
")",
"timezone",
".",
"add_component",
"(",
"subcomp",
")",
"return",
"timezone"
] |
create an icalendar vtimezone from a pytz.tzinfo.StaticTzInfo
:param tz: the timezone
:type tz: pytz.tzinfo.StaticTzInfo
:returns: timezone information
:rtype: icalendar.Timezone()
|
[
"create",
"an",
"icalendar",
"vtimezone",
"from",
"a",
"pytz",
".",
"tzinfo",
".",
"StaticTzInfo"
] |
316283140ca5171a68ad3170a5964fdc89be0b56
|
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/formats/vtimezone.py#L141-L158
|
train
|
Onyo/jsonbender
|
jsonbender/core.py
|
bend
|
def bend(mapping, source, context=None):
"""
The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map.
"""
context = {} if context is None else context
transport = Transport(source, context)
return _bend(mapping, transport)
|
python
|
def bend(mapping, source, context=None):
"""
The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map.
"""
context = {} if context is None else context
transport = Transport(source, context)
return _bend(mapping, transport)
|
[
"def",
"bend",
"(",
"mapping",
",",
"source",
",",
"context",
"=",
"None",
")",
":",
"context",
"=",
"{",
"}",
"if",
"context",
"is",
"None",
"else",
"context",
"transport",
"=",
"Transport",
"(",
"source",
",",
"context",
")",
"return",
"_bend",
"(",
"mapping",
",",
"transport",
")"
] |
The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map.
|
[
"The",
"main",
"bending",
"function",
"."
] |
df85ec444be3185d346db894402ca6eaa38dd947
|
https://github.com/Onyo/jsonbender/blob/df85ec444be3185d346db894402ca6eaa38dd947/jsonbender/core.py#L216-L227
|
train
|
Onyo/jsonbender
|
jsonbender/selectors.py
|
F.protect
|
def protect(self, protect_against=None):
"""
Return a ProtectedF with the same parameters and with the given
`protect_against`.
"""
return ProtectedF(self._func,
*self._args,
protect_against=protect_against,
**self._kwargs)
|
python
|
def protect(self, protect_against=None):
"""
Return a ProtectedF with the same parameters and with the given
`protect_against`.
"""
return ProtectedF(self._func,
*self._args,
protect_against=protect_against,
**self._kwargs)
|
[
"def",
"protect",
"(",
"self",
",",
"protect_against",
"=",
"None",
")",
":",
"return",
"ProtectedF",
"(",
"self",
".",
"_func",
",",
"*",
"self",
".",
"_args",
",",
"protect_against",
"=",
"protect_against",
",",
"*",
"*",
"self",
".",
"_kwargs",
")"
] |
Return a ProtectedF with the same parameters and with the given
`protect_against`.
|
[
"Return",
"a",
"ProtectedF",
"with",
"the",
"same",
"parameters",
"and",
"with",
"the",
"given",
"protect_against",
"."
] |
df85ec444be3185d346db894402ca6eaa38dd947
|
https://github.com/Onyo/jsonbender/blob/df85ec444be3185d346db894402ca6eaa38dd947/jsonbender/selectors.py#L83-L91
|
train
|
wdecoster/NanoPlot
|
nanoplot/utils.py
|
init_logs
|
def init_logs(args, tool="NanoPlot"):
"""Initiate log file and log arguments."""
start_time = dt.fromtimestamp(time()).strftime('%Y%m%d_%H%M')
logname = os.path.join(args.outdir, args.prefix + tool + "_" + start_time + ".log")
handlers = [logging.FileHandler(logname)]
if args.verbose:
handlers.append(logging.StreamHandler())
logging.basicConfig(
format='%(asctime)s %(message)s',
handlers=handlers,
level=logging.INFO)
logging.info('{} {} started with arguments {}'.format(tool, __version__, args))
logging.info('Python version is: {}'.format(sys.version.replace('\n', ' ')))
return logname
|
python
|
def init_logs(args, tool="NanoPlot"):
"""Initiate log file and log arguments."""
start_time = dt.fromtimestamp(time()).strftime('%Y%m%d_%H%M')
logname = os.path.join(args.outdir, args.prefix + tool + "_" + start_time + ".log")
handlers = [logging.FileHandler(logname)]
if args.verbose:
handlers.append(logging.StreamHandler())
logging.basicConfig(
format='%(asctime)s %(message)s',
handlers=handlers,
level=logging.INFO)
logging.info('{} {} started with arguments {}'.format(tool, __version__, args))
logging.info('Python version is: {}'.format(sys.version.replace('\n', ' ')))
return logname
|
[
"def",
"init_logs",
"(",
"args",
",",
"tool",
"=",
"\"NanoPlot\"",
")",
":",
"start_time",
"=",
"dt",
".",
"fromtimestamp",
"(",
"time",
"(",
")",
")",
".",
"strftime",
"(",
"'%Y%m%d_%H%M'",
")",
"logname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdir",
",",
"args",
".",
"prefix",
"+",
"tool",
"+",
"\"_\"",
"+",
"start_time",
"+",
"\".log\"",
")",
"handlers",
"=",
"[",
"logging",
".",
"FileHandler",
"(",
"logname",
")",
"]",
"if",
"args",
".",
"verbose",
":",
"handlers",
".",
"append",
"(",
"logging",
".",
"StreamHandler",
"(",
")",
")",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"'%(asctime)s %(message)s'",
",",
"handlers",
"=",
"handlers",
",",
"level",
"=",
"logging",
".",
"INFO",
")",
"logging",
".",
"info",
"(",
"'{} {} started with arguments {}'",
".",
"format",
"(",
"tool",
",",
"__version__",
",",
"args",
")",
")",
"logging",
".",
"info",
"(",
"'Python version is: {}'",
".",
"format",
"(",
"sys",
".",
"version",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
")",
")",
"return",
"logname"
] |
Initiate log file and log arguments.
|
[
"Initiate",
"log",
"file",
"and",
"log",
"arguments",
"."
] |
d1601076731df2a07020316bd159b544f497a606
|
https://github.com/wdecoster/NanoPlot/blob/d1601076731df2a07020316bd159b544f497a606/nanoplot/utils.py#L61-L74
|
train
|
wdecoster/NanoPlot
|
nanoplot/filteroptions.py
|
flag_length_outliers
|
def flag_length_outliers(df, columnname):
"""Return index of records with length-outliers above 3 standard deviations from the median."""
return df[columnname] > (np.median(df[columnname]) + 3 * np.std(df[columnname]))
|
python
|
def flag_length_outliers(df, columnname):
"""Return index of records with length-outliers above 3 standard deviations from the median."""
return df[columnname] > (np.median(df[columnname]) + 3 * np.std(df[columnname]))
|
[
"def",
"flag_length_outliers",
"(",
"df",
",",
"columnname",
")",
":",
"return",
"df",
"[",
"columnname",
"]",
">",
"(",
"np",
".",
"median",
"(",
"df",
"[",
"columnname",
"]",
")",
"+",
"3",
"*",
"np",
".",
"std",
"(",
"df",
"[",
"columnname",
"]",
")",
")"
] |
Return index of records with length-outliers above 3 standard deviations from the median.
|
[
"Return",
"index",
"of",
"records",
"with",
"length",
"-",
"outliers",
"above",
"3",
"standard",
"deviations",
"from",
"the",
"median",
"."
] |
d1601076731df2a07020316bd159b544f497a606
|
https://github.com/wdecoster/NanoPlot/blob/d1601076731df2a07020316bd159b544f497a606/nanoplot/filteroptions.py#L6-L8
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
_raise_for_status
|
def _raise_for_status(response):
'''
Custom raise_for_status with more appropriate error message.
'''
http_error_msg = ""
if 400 <= response.status_code < 500:
http_error_msg = "{0} Client Error: {1}".format(response.status_code,
response.reason)
elif 500 <= response.status_code < 600:
http_error_msg = "{0} Server Error: {1}".format(response.status_code,
response.reason)
if http_error_msg:
try:
more_info = response.json().get("message")
except ValueError:
more_info = None
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += ".\n\t{0}".format(more_info)
raise requests.exceptions.HTTPError(http_error_msg, response=response)
|
python
|
def _raise_for_status(response):
'''
Custom raise_for_status with more appropriate error message.
'''
http_error_msg = ""
if 400 <= response.status_code < 500:
http_error_msg = "{0} Client Error: {1}".format(response.status_code,
response.reason)
elif 500 <= response.status_code < 600:
http_error_msg = "{0} Server Error: {1}".format(response.status_code,
response.reason)
if http_error_msg:
try:
more_info = response.json().get("message")
except ValueError:
more_info = None
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += ".\n\t{0}".format(more_info)
raise requests.exceptions.HTTPError(http_error_msg, response=response)
|
[
"def",
"_raise_for_status",
"(",
"response",
")",
":",
"http_error_msg",
"=",
"\"\"",
"if",
"400",
"<=",
"response",
".",
"status_code",
"<",
"500",
":",
"http_error_msg",
"=",
"\"{0} Client Error: {1}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
")",
"elif",
"500",
"<=",
"response",
".",
"status_code",
"<",
"600",
":",
"http_error_msg",
"=",
"\"{0} Server Error: {1}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
")",
"if",
"http_error_msg",
":",
"try",
":",
"more_info",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"message\"",
")",
"except",
"ValueError",
":",
"more_info",
"=",
"None",
"if",
"more_info",
"and",
"more_info",
".",
"lower",
"(",
")",
"!=",
"response",
".",
"reason",
".",
"lower",
"(",
")",
":",
"http_error_msg",
"+=",
"\".\\n\\t{0}\"",
".",
"format",
"(",
"more_info",
")",
"raise",
"requests",
".",
"exceptions",
".",
"HTTPError",
"(",
"http_error_msg",
",",
"response",
"=",
"response",
")"
] |
Custom raise_for_status with more appropriate error message.
|
[
"Custom",
"raise_for_status",
"with",
"more",
"appropriate",
"error",
"message",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L510-L531
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
_clear_empty_values
|
def _clear_empty_values(args):
'''
Scrap junk data from a dict.
'''
result = {}
for param in args:
if args[param] is not None:
result[param] = args[param]
return result
|
python
|
def _clear_empty_values(args):
'''
Scrap junk data from a dict.
'''
result = {}
for param in args:
if args[param] is not None:
result[param] = args[param]
return result
|
[
"def",
"_clear_empty_values",
"(",
"args",
")",
":",
"result",
"=",
"{",
"}",
"for",
"param",
"in",
"args",
":",
"if",
"args",
"[",
"param",
"]",
"is",
"not",
"None",
":",
"result",
"[",
"param",
"]",
"=",
"args",
"[",
"param",
"]",
"return",
"result"
] |
Scrap junk data from a dict.
|
[
"Scrap",
"junk",
"data",
"from",
"a",
"dict",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L534-L542
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
authentication_validation
|
def authentication_validation(username, password, access_token):
'''
Only accept one form of authentication.
'''
if bool(username) is not bool(password):
raise Exception("Basic authentication requires a username AND"
" password.")
if (username and access_token) or (password and access_token):
raise Exception("Cannot use both Basic Authentication and"
" OAuth2.0. Please use only one authentication"
" method.")
|
python
|
def authentication_validation(username, password, access_token):
'''
Only accept one form of authentication.
'''
if bool(username) is not bool(password):
raise Exception("Basic authentication requires a username AND"
" password.")
if (username and access_token) or (password and access_token):
raise Exception("Cannot use both Basic Authentication and"
" OAuth2.0. Please use only one authentication"
" method.")
|
[
"def",
"authentication_validation",
"(",
"username",
",",
"password",
",",
"access_token",
")",
":",
"if",
"bool",
"(",
"username",
")",
"is",
"not",
"bool",
"(",
"password",
")",
":",
"raise",
"Exception",
"(",
"\"Basic authentication requires a username AND\"",
"\" password.\"",
")",
"if",
"(",
"username",
"and",
"access_token",
")",
"or",
"(",
"password",
"and",
"access_token",
")",
":",
"raise",
"Exception",
"(",
"\"Cannot use both Basic Authentication and\"",
"\" OAuth2.0. Please use only one authentication\"",
"\" method.\"",
")"
] |
Only accept one form of authentication.
|
[
"Only",
"accept",
"one",
"form",
"of",
"authentication",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L570-L580
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
_download_file
|
def _download_file(url, local_filename):
'''
Utility function that downloads a chunked response from the specified url to a local path.
This method is suitable for larger downloads.
'''
response = requests.get(url, stream=True)
with open(local_filename, 'wb') as outfile:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
outfile.write(chunk)
|
python
|
def _download_file(url, local_filename):
'''
Utility function that downloads a chunked response from the specified url to a local path.
This method is suitable for larger downloads.
'''
response = requests.get(url, stream=True)
with open(local_filename, 'wb') as outfile:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
outfile.write(chunk)
|
[
"def",
"_download_file",
"(",
"url",
",",
"local_filename",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"with",
"open",
"(",
"local_filename",
",",
"'wb'",
")",
"as",
"outfile",
":",
"for",
"chunk",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"1024",
")",
":",
"if",
"chunk",
":",
"# filter out keep-alive new chunks",
"outfile",
".",
"write",
"(",
"chunk",
")"
] |
Utility function that downloads a chunked response from the specified url to a local path.
This method is suitable for larger downloads.
|
[
"Utility",
"function",
"that",
"downloads",
"a",
"chunked",
"response",
"from",
"the",
"specified",
"url",
"to",
"a",
"local",
"path",
".",
"This",
"method",
"is",
"suitable",
"for",
"larger",
"downloads",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L583-L592
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
Socrata.set_permission
|
def set_permission(self, dataset_identifier, permission="private", content_type="json"):
'''
Set a dataset's permissions to private or public
Options are private, public
'''
resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type)
params = {
"method": "setPermission",
"value": "public.read" if permission == "public" else permission
}
return self._perform_request("put", resource, params=params)
|
python
|
def set_permission(self, dataset_identifier, permission="private", content_type="json"):
'''
Set a dataset's permissions to private or public
Options are private, public
'''
resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type)
params = {
"method": "setPermission",
"value": "public.read" if permission == "public" else permission
}
return self._perform_request("put", resource, params=params)
|
[
"def",
"set_permission",
"(",
"self",
",",
"dataset_identifier",
",",
"permission",
"=",
"\"private\"",
",",
"content_type",
"=",
"\"json\"",
")",
":",
"resource",
"=",
"_format_old_api_request",
"(",
"dataid",
"=",
"dataset_identifier",
",",
"content_type",
"=",
"content_type",
")",
"params",
"=",
"{",
"\"method\"",
":",
"\"setPermission\"",
",",
"\"value\"",
":",
"\"public.read\"",
"if",
"permission",
"==",
"\"public\"",
"else",
"permission",
"}",
"return",
"self",
".",
"_perform_request",
"(",
"\"put\"",
",",
"resource",
",",
"params",
"=",
"params",
")"
] |
Set a dataset's permissions to private or public
Options are private, public
|
[
"Set",
"a",
"dataset",
"s",
"permissions",
"to",
"private",
"or",
"public",
"Options",
"are",
"private",
"public"
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L236-L249
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
Socrata.get_metadata
|
def get_metadata(self, dataset_identifier, content_type="json"):
'''
Retrieve the metadata for a particular dataset.
'''
resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type)
return self._perform_request("get", resource)
|
python
|
def get_metadata(self, dataset_identifier, content_type="json"):
'''
Retrieve the metadata for a particular dataset.
'''
resource = _format_old_api_request(dataid=dataset_identifier, content_type=content_type)
return self._perform_request("get", resource)
|
[
"def",
"get_metadata",
"(",
"self",
",",
"dataset_identifier",
",",
"content_type",
"=",
"\"json\"",
")",
":",
"resource",
"=",
"_format_old_api_request",
"(",
"dataid",
"=",
"dataset_identifier",
",",
"content_type",
"=",
"content_type",
")",
"return",
"self",
".",
"_perform_request",
"(",
"\"get\"",
",",
"resource",
")"
] |
Retrieve the metadata for a particular dataset.
|
[
"Retrieve",
"the",
"metadata",
"for",
"a",
"particular",
"dataset",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L251-L256
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
Socrata.download_attachments
|
def download_attachments(self, dataset_identifier, content_type="json",
download_dir="~/sodapy_downloads"):
'''
Download all of the attachments associated with a dataset. Return the paths of downloaded
files.
'''
metadata = self.get_metadata(dataset_identifier, content_type=content_type)
files = []
attachments = metadata['metadata'].get("attachments")
if not attachments:
logging.info("No attachments were found or downloaded.")
return files
download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
for attachment in attachments:
file_path = os.path.join(download_dir, attachment["filename"])
has_assetid = attachment.get("assetId", False)
if has_assetid:
base = _format_old_api_request(dataid=dataset_identifier)
assetid = attachment["assetId"]
resource = "{0}/files/{1}?download=true&filename={2}"\
.format(base, assetid, attachment["filename"])
else:
base = "/api/assets"
assetid = attachment["blobId"]
resource = "{0}/{1}?download=true".format(base, assetid)
uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource)
_download_file(uri, file_path)
files.append(file_path)
logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files)))
return files
|
python
|
def download_attachments(self, dataset_identifier, content_type="json",
download_dir="~/sodapy_downloads"):
'''
Download all of the attachments associated with a dataset. Return the paths of downloaded
files.
'''
metadata = self.get_metadata(dataset_identifier, content_type=content_type)
files = []
attachments = metadata['metadata'].get("attachments")
if not attachments:
logging.info("No attachments were found or downloaded.")
return files
download_dir = os.path.join(os.path.expanduser(download_dir), dataset_identifier)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
for attachment in attachments:
file_path = os.path.join(download_dir, attachment["filename"])
has_assetid = attachment.get("assetId", False)
if has_assetid:
base = _format_old_api_request(dataid=dataset_identifier)
assetid = attachment["assetId"]
resource = "{0}/files/{1}?download=true&filename={2}"\
.format(base, assetid, attachment["filename"])
else:
base = "/api/assets"
assetid = attachment["blobId"]
resource = "{0}/{1}?download=true".format(base, assetid)
uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource)
_download_file(uri, file_path)
files.append(file_path)
logging.info("The following files were downloaded:\n\t{0}".format("\n\t".join(files)))
return files
|
[
"def",
"download_attachments",
"(",
"self",
",",
"dataset_identifier",
",",
"content_type",
"=",
"\"json\"",
",",
"download_dir",
"=",
"\"~/sodapy_downloads\"",
")",
":",
"metadata",
"=",
"self",
".",
"get_metadata",
"(",
"dataset_identifier",
",",
"content_type",
"=",
"content_type",
")",
"files",
"=",
"[",
"]",
"attachments",
"=",
"metadata",
"[",
"'metadata'",
"]",
".",
"get",
"(",
"\"attachments\"",
")",
"if",
"not",
"attachments",
":",
"logging",
".",
"info",
"(",
"\"No attachments were found or downloaded.\"",
")",
"return",
"files",
"download_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"download_dir",
")",
",",
"dataset_identifier",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"download_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"download_dir",
")",
"for",
"attachment",
"in",
"attachments",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"download_dir",
",",
"attachment",
"[",
"\"filename\"",
"]",
")",
"has_assetid",
"=",
"attachment",
".",
"get",
"(",
"\"assetId\"",
",",
"False",
")",
"if",
"has_assetid",
":",
"base",
"=",
"_format_old_api_request",
"(",
"dataid",
"=",
"dataset_identifier",
")",
"assetid",
"=",
"attachment",
"[",
"\"assetId\"",
"]",
"resource",
"=",
"\"{0}/files/{1}?download=true&filename={2}\"",
".",
"format",
"(",
"base",
",",
"assetid",
",",
"attachment",
"[",
"\"filename\"",
"]",
")",
"else",
":",
"base",
"=",
"\"/api/assets\"",
"assetid",
"=",
"attachment",
"[",
"\"blobId\"",
"]",
"resource",
"=",
"\"{0}/{1}?download=true\"",
".",
"format",
"(",
"base",
",",
"assetid",
")",
"uri",
"=",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"self",
".",
"uri_prefix",
",",
"self",
".",
"domain",
",",
"resource",
")",
"_download_file",
"(",
"uri",
",",
"file_path",
")",
"files",
".",
"append",
"(",
"file_path",
")",
"logging",
".",
"info",
"(",
"\"The following files were downloaded:\\n\\t{0}\"",
".",
"format",
"(",
"\"\\n\\t\"",
".",
"join",
"(",
"files",
")",
")",
")",
"return",
"files"
] |
Download all of the attachments associated with a dataset. Return the paths of downloaded
files.
|
[
"Download",
"all",
"of",
"the",
"attachments",
"associated",
"with",
"a",
"dataset",
".",
"Return",
"the",
"paths",
"of",
"downloaded",
"files",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L269-L304
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
Socrata.replace_non_data_file
|
def replace_non_data_file(self, dataset_identifier, params, file_data):
'''
Same as create_non_data_file, but replaces a file that already exists in a
file-based dataset.
WARNING: a table-based dataset cannot be replaced by a file-based dataset.
Use create_non_data_file in order to replace.
'''
resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt")
if not params.get('method', None):
params['method'] = 'replaceBlob'
params['id'] = dataset_identifier
return self._perform_request("post", resource, params=params, files=file_data)
|
python
|
def replace_non_data_file(self, dataset_identifier, params, file_data):
'''
Same as create_non_data_file, but replaces a file that already exists in a
file-based dataset.
WARNING: a table-based dataset cannot be replaced by a file-based dataset.
Use create_non_data_file in order to replace.
'''
resource = _format_old_api_request(dataid=dataset_identifier, content_type="txt")
if not params.get('method', None):
params['method'] = 'replaceBlob'
params['id'] = dataset_identifier
return self._perform_request("post", resource, params=params, files=file_data)
|
[
"def",
"replace_non_data_file",
"(",
"self",
",",
"dataset_identifier",
",",
"params",
",",
"file_data",
")",
":",
"resource",
"=",
"_format_old_api_request",
"(",
"dataid",
"=",
"dataset_identifier",
",",
"content_type",
"=",
"\"txt\"",
")",
"if",
"not",
"params",
".",
"get",
"(",
"'method'",
",",
"None",
")",
":",
"params",
"[",
"'method'",
"]",
"=",
"'replaceBlob'",
"params",
"[",
"'id'",
"]",
"=",
"dataset_identifier",
"return",
"self",
".",
"_perform_request",
"(",
"\"post\"",
",",
"resource",
",",
"params",
"=",
"params",
",",
"files",
"=",
"file_data",
")"
] |
Same as create_non_data_file, but replaces a file that already exists in a
file-based dataset.
WARNING: a table-based dataset cannot be replaced by a file-based dataset.
Use create_non_data_file in order to replace.
|
[
"Same",
"as",
"create_non_data_file",
"but",
"replaces",
"a",
"file",
"that",
"already",
"exists",
"in",
"a",
"file",
"-",
"based",
"dataset",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L400-L415
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
Socrata._perform_update
|
def _perform_update(self, method, resource, payload):
'''
Execute the update task.
'''
# python2/3 compatibility wizardry
try:
file_type = file
except NameError:
file_type = IOBase
if isinstance(payload, (dict, list)):
response = self._perform_request(method, resource,
data=json.dumps(payload))
elif isinstance(payload, file_type):
headers = {
"content-type": "text/csv",
}
response = self._perform_request(method, resource, data=payload,
headers=headers)
else:
raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-,"
" and file-types are supported.".format(type(payload)))
return response
|
python
|
def _perform_update(self, method, resource, payload):
'''
Execute the update task.
'''
# python2/3 compatibility wizardry
try:
file_type = file
except NameError:
file_type = IOBase
if isinstance(payload, (dict, list)):
response = self._perform_request(method, resource,
data=json.dumps(payload))
elif isinstance(payload, file_type):
headers = {
"content-type": "text/csv",
}
response = self._perform_request(method, resource, data=payload,
headers=headers)
else:
raise Exception("Unrecognized payload {0}. Currently only list-, dictionary-,"
" and file-types are supported.".format(type(payload)))
return response
|
[
"def",
"_perform_update",
"(",
"self",
",",
"method",
",",
"resource",
",",
"payload",
")",
":",
"# python2/3 compatibility wizardry",
"try",
":",
"file_type",
"=",
"file",
"except",
"NameError",
":",
"file_type",
"=",
"IOBase",
"if",
"isinstance",
"(",
"payload",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"response",
"=",
"self",
".",
"_perform_request",
"(",
"method",
",",
"resource",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"elif",
"isinstance",
"(",
"payload",
",",
"file_type",
")",
":",
"headers",
"=",
"{",
"\"content-type\"",
":",
"\"text/csv\"",
",",
"}",
"response",
"=",
"self",
".",
"_perform_request",
"(",
"method",
",",
"resource",
",",
"data",
"=",
"payload",
",",
"headers",
"=",
"headers",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unrecognized payload {0}. Currently only list-, dictionary-,\"",
"\" and file-types are supported.\"",
".",
"format",
"(",
"type",
"(",
"payload",
")",
")",
")",
"return",
"response"
] |
Execute the update task.
|
[
"Execute",
"the",
"update",
"task",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L417-L441
|
train
|
xmunoz/sodapy
|
sodapy/__init__.py
|
Socrata._perform_request
|
def _perform_request(self, request_type, resource, **kwargs):
'''
Utility method that performs all requests.
'''
request_type_methods = set(["get", "post", "put", "delete"])
if request_type not in request_type_methods:
raise Exception("Unknown request type. Supported request types are"
": {0}".format(", ".join(request_type_methods)))
uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource)
# set a timeout, just to be safe
kwargs["timeout"] = self.timeout
response = getattr(self.session, request_type)(uri, **kwargs)
# handle errors
if response.status_code not in (200, 202):
_raise_for_status(response)
# when responses have no content body (ie. delete, set_permission),
# simply return the whole response
if not response.text:
return response
# for other request types, return most useful data
content_type = response.headers.get('content-type').strip().lower()
if re.match(r'application\/json', content_type):
return response.json()
elif re.match(r'text\/csv', content_type):
csv_stream = StringIO(response.text)
return [line for line in csv.reader(csv_stream)]
elif re.match(r'application\/rdf\+xml', content_type):
return response.content
elif re.match(r'text\/plain', content_type):
try:
return json.loads(response.text)
except ValueError:
return response.text
else:
raise Exception("Unknown response format: {0}"
.format(content_type))
|
python
|
def _perform_request(self, request_type, resource, **kwargs):
'''
Utility method that performs all requests.
'''
request_type_methods = set(["get", "post", "put", "delete"])
if request_type not in request_type_methods:
raise Exception("Unknown request type. Supported request types are"
": {0}".format(", ".join(request_type_methods)))
uri = "{0}{1}{2}".format(self.uri_prefix, self.domain, resource)
# set a timeout, just to be safe
kwargs["timeout"] = self.timeout
response = getattr(self.session, request_type)(uri, **kwargs)
# handle errors
if response.status_code not in (200, 202):
_raise_for_status(response)
# when responses have no content body (ie. delete, set_permission),
# simply return the whole response
if not response.text:
return response
# for other request types, return most useful data
content_type = response.headers.get('content-type').strip().lower()
if re.match(r'application\/json', content_type):
return response.json()
elif re.match(r'text\/csv', content_type):
csv_stream = StringIO(response.text)
return [line for line in csv.reader(csv_stream)]
elif re.match(r'application\/rdf\+xml', content_type):
return response.content
elif re.match(r'text\/plain', content_type):
try:
return json.loads(response.text)
except ValueError:
return response.text
else:
raise Exception("Unknown response format: {0}"
.format(content_type))
|
[
"def",
"_perform_request",
"(",
"self",
",",
"request_type",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"request_type_methods",
"=",
"set",
"(",
"[",
"\"get\"",
",",
"\"post\"",
",",
"\"put\"",
",",
"\"delete\"",
"]",
")",
"if",
"request_type",
"not",
"in",
"request_type_methods",
":",
"raise",
"Exception",
"(",
"\"Unknown request type. Supported request types are\"",
"\": {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"request_type_methods",
")",
")",
")",
"uri",
"=",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"self",
".",
"uri_prefix",
",",
"self",
".",
"domain",
",",
"resource",
")",
"# set a timeout, just to be safe",
"kwargs",
"[",
"\"timeout\"",
"]",
"=",
"self",
".",
"timeout",
"response",
"=",
"getattr",
"(",
"self",
".",
"session",
",",
"request_type",
")",
"(",
"uri",
",",
"*",
"*",
"kwargs",
")",
"# handle errors",
"if",
"response",
".",
"status_code",
"not",
"in",
"(",
"200",
",",
"202",
")",
":",
"_raise_for_status",
"(",
"response",
")",
"# when responses have no content body (ie. delete, set_permission),",
"# simply return the whole response",
"if",
"not",
"response",
".",
"text",
":",
"return",
"response",
"# for other request types, return most useful data",
"content_type",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'content-type'",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"re",
".",
"match",
"(",
"r'application\\/json'",
",",
"content_type",
")",
":",
"return",
"response",
".",
"json",
"(",
")",
"elif",
"re",
".",
"match",
"(",
"r'text\\/csv'",
",",
"content_type",
")",
":",
"csv_stream",
"=",
"StringIO",
"(",
"response",
".",
"text",
")",
"return",
"[",
"line",
"for",
"line",
"in",
"csv",
".",
"reader",
"(",
"csv_stream",
")",
"]",
"elif",
"re",
".",
"match",
"(",
"r'application\\/rdf\\+xml'",
",",
"content_type",
")",
":",
"return",
"response",
".",
"content",
"elif",
"re",
".",
"match",
"(",
"r'text\\/plain'",
",",
"content_type",
")",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
"except",
"ValueError",
":",
"return",
"response",
".",
"text",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknown response format: {0}\"",
".",
"format",
"(",
"content_type",
")",
")"
] |
Utility method that performs all requests.
|
[
"Utility",
"method",
"that",
"performs",
"all",
"requests",
"."
] |
dad2ca9560cde0acb03bdb4423260e891ca40d7b
|
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L459-L500
|
train
|
ElsevierDev/elsapy
|
elsapy/elsclient.py
|
ElsClient.exec_request
|
def exec_request(self, URL):
"""Sends the actual request; returns response."""
## Throttle request, if need be
interval = time.time() - self.__ts_last_req
if (interval < self.__min_req_interval):
time.sleep( self.__min_req_interval - interval )
## Construct and execute request
headers = {
"X-ELS-APIKey" : self.api_key,
"User-Agent" : self.__user_agent,
"Accept" : 'application/json'
}
if self.inst_token:
headers["X-ELS-Insttoken"] = self.inst_token
logger.info('Sending GET request to ' + URL)
r = requests.get(
URL,
headers = headers
)
self.__ts_last_req = time.time()
self._status_code=r.status_code
if r.status_code == 200:
self._status_msg='data retrieved'
return json.loads(r.text)
else:
self._status_msg="HTTP " + str(r.status_code) + " Error from " + URL + " and using headers " + str(headers) + ": " + r.text
raise requests.HTTPError("HTTP " + str(r.status_code) + " Error from " + URL + "\nand using headers " + str(headers) + ":\n" + r.text)
|
python
|
def exec_request(self, URL):
"""Sends the actual request; returns response."""
## Throttle request, if need be
interval = time.time() - self.__ts_last_req
if (interval < self.__min_req_interval):
time.sleep( self.__min_req_interval - interval )
## Construct and execute request
headers = {
"X-ELS-APIKey" : self.api_key,
"User-Agent" : self.__user_agent,
"Accept" : 'application/json'
}
if self.inst_token:
headers["X-ELS-Insttoken"] = self.inst_token
logger.info('Sending GET request to ' + URL)
r = requests.get(
URL,
headers = headers
)
self.__ts_last_req = time.time()
self._status_code=r.status_code
if r.status_code == 200:
self._status_msg='data retrieved'
return json.loads(r.text)
else:
self._status_msg="HTTP " + str(r.status_code) + " Error from " + URL + " and using headers " + str(headers) + ": " + r.text
raise requests.HTTPError("HTTP " + str(r.status_code) + " Error from " + URL + "\nand using headers " + str(headers) + ":\n" + r.text)
|
[
"def",
"exec_request",
"(",
"self",
",",
"URL",
")",
":",
"## Throttle request, if need be",
"interval",
"=",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"__ts_last_req",
"if",
"(",
"interval",
"<",
"self",
".",
"__min_req_interval",
")",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"__min_req_interval",
"-",
"interval",
")",
"## Construct and execute request",
"headers",
"=",
"{",
"\"X-ELS-APIKey\"",
":",
"self",
".",
"api_key",
",",
"\"User-Agent\"",
":",
"self",
".",
"__user_agent",
",",
"\"Accept\"",
":",
"'application/json'",
"}",
"if",
"self",
".",
"inst_token",
":",
"headers",
"[",
"\"X-ELS-Insttoken\"",
"]",
"=",
"self",
".",
"inst_token",
"logger",
".",
"info",
"(",
"'Sending GET request to '",
"+",
"URL",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"URL",
",",
"headers",
"=",
"headers",
")",
"self",
".",
"__ts_last_req",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_status_code",
"=",
"r",
".",
"status_code",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"_status_msg",
"=",
"'data retrieved'",
"return",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"else",
":",
"self",
".",
"_status_msg",
"=",
"\"HTTP \"",
"+",
"str",
"(",
"r",
".",
"status_code",
")",
"+",
"\" Error from \"",
"+",
"URL",
"+",
"\" and using headers \"",
"+",
"str",
"(",
"headers",
")",
"+",
"\": \"",
"+",
"r",
".",
"text",
"raise",
"requests",
".",
"HTTPError",
"(",
"\"HTTP \"",
"+",
"str",
"(",
"r",
".",
"status_code",
")",
"+",
"\" Error from \"",
"+",
"URL",
"+",
"\"\\nand using headers \"",
"+",
"str",
"(",
"headers",
")",
"+",
"\":\\n\"",
"+",
"r",
".",
"text",
")"
] |
Sends the actual request; returns response.
|
[
"Sends",
"the",
"actual",
"request",
";",
"returns",
"response",
"."
] |
a8a8b043816441e3ed0c834e792f7089231092da
|
https://github.com/ElsevierDev/elsapy/blob/a8a8b043816441e3ed0c834e792f7089231092da/elsapy/elsclient.py#L91-L119
|
train
|
ElsevierDev/elsapy
|
elsapy/elsentity.py
|
ElsEntity.write
|
def write(self):
"""If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False."""
if (self.data):
dataPath = self.client.local_dir / (urllib.parse.quote_plus(self.uri)+'.json')
with dataPath.open(mode='w') as dump_file:
json.dump(self.data, dump_file)
dump_file.close()
logger.info('Wrote ' + self.uri + ' to file')
return True
else:
logger.warning('No data to write for ' + self.uri)
return False
|
python
|
def write(self):
"""If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False."""
if (self.data):
dataPath = self.client.local_dir / (urllib.parse.quote_plus(self.uri)+'.json')
with dataPath.open(mode='w') as dump_file:
json.dump(self.data, dump_file)
dump_file.close()
logger.info('Wrote ' + self.uri + ' to file')
return True
else:
logger.warning('No data to write for ' + self.uri)
return False
|
[
"def",
"write",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"data",
")",
":",
"dataPath",
"=",
"self",
".",
"client",
".",
"local_dir",
"/",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"self",
".",
"uri",
")",
"+",
"'.json'",
")",
"with",
"dataPath",
".",
"open",
"(",
"mode",
"=",
"'w'",
")",
"as",
"dump_file",
":",
"json",
".",
"dump",
"(",
"self",
".",
"data",
",",
"dump_file",
")",
"dump_file",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'Wrote '",
"+",
"self",
".",
"uri",
"+",
"' to file'",
")",
"return",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"'No data to write for '",
"+",
"self",
".",
"uri",
")",
"return",
"False"
] |
If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False.
|
[
"If",
"data",
"exists",
"for",
"the",
"entity",
"writes",
"it",
"to",
"disk",
"as",
"a",
".",
"JSON",
"file",
"with",
"the",
"url",
"-",
"encoded",
"URI",
"as",
"the",
"filename",
"and",
"returns",
"True",
".",
"Else",
"returns",
"False",
"."
] |
a8a8b043816441e3ed0c834e792f7089231092da
|
https://github.com/ElsevierDev/elsapy/blob/a8a8b043816441e3ed0c834e792f7089231092da/elsapy/elsentity.py#L84-L97
|
train
|
ElsevierDev/elsapy
|
elsapy/elsprofile.py
|
ElsProfile.write_docs
|
def write_docs(self):
"""If a doclist exists for the entity, writes it to disk as a .JSON file
with the url-encoded URI as the filename and returns True. Else,
returns False."""
if self.doc_list:
dataPath = self.client.local_dir
dump_file = open('data/'
+ urllib.parse.quote_plus(self.uri+'?view=documents')
+ '.json', mode='w'
)
dump_file.write('[' + json.dumps(self.doc_list[0]))
for i in range (1, len(self.doc_list)):
dump_file.write(',' + json.dumps(self.doc_list[i]))
dump_file.write(']')
dump_file.close()
logger.info('Wrote ' + self.uri + '?view=documents to file')
return True
else:
logger.warning('No doclist to write for ' + self.uri)
return False
|
python
|
def write_docs(self):
"""If a doclist exists for the entity, writes it to disk as a .JSON file
with the url-encoded URI as the filename and returns True. Else,
returns False."""
if self.doc_list:
dataPath = self.client.local_dir
dump_file = open('data/'
+ urllib.parse.quote_plus(self.uri+'?view=documents')
+ '.json', mode='w'
)
dump_file.write('[' + json.dumps(self.doc_list[0]))
for i in range (1, len(self.doc_list)):
dump_file.write(',' + json.dumps(self.doc_list[i]))
dump_file.write(']')
dump_file.close()
logger.info('Wrote ' + self.uri + '?view=documents to file')
return True
else:
logger.warning('No doclist to write for ' + self.uri)
return False
|
[
"def",
"write_docs",
"(",
"self",
")",
":",
"if",
"self",
".",
"doc_list",
":",
"dataPath",
"=",
"self",
".",
"client",
".",
"local_dir",
"dump_file",
"=",
"open",
"(",
"'data/'",
"+",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"self",
".",
"uri",
"+",
"'?view=documents'",
")",
"+",
"'.json'",
",",
"mode",
"=",
"'w'",
")",
"dump_file",
".",
"write",
"(",
"'['",
"+",
"json",
".",
"dumps",
"(",
"self",
".",
"doc_list",
"[",
"0",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"doc_list",
")",
")",
":",
"dump_file",
".",
"write",
"(",
"','",
"+",
"json",
".",
"dumps",
"(",
"self",
".",
"doc_list",
"[",
"i",
"]",
")",
")",
"dump_file",
".",
"write",
"(",
"']'",
")",
"dump_file",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'Wrote '",
"+",
"self",
".",
"uri",
"+",
"'?view=documents to file'",
")",
"return",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"'No doclist to write for '",
"+",
"self",
".",
"uri",
")",
"return",
"False"
] |
If a doclist exists for the entity, writes it to disk as a .JSON file
with the url-encoded URI as the filename and returns True. Else,
returns False.
|
[
"If",
"a",
"doclist",
"exists",
"for",
"the",
"entity",
"writes",
"it",
"to",
"disk",
"as",
"a",
".",
"JSON",
"file",
"with",
"the",
"url",
"-",
"encoded",
"URI",
"as",
"the",
"filename",
"and",
"returns",
"True",
".",
"Else",
"returns",
"False",
"."
] |
a8a8b043816441e3ed0c834e792f7089231092da
|
https://github.com/ElsevierDev/elsapy/blob/a8a8b043816441e3ed0c834e792f7089231092da/elsapy/elsprofile.py#L66-L85
|
train
|
ElsevierDev/elsapy
|
elsapy/elsprofile.py
|
ElsAuthor.read
|
def read(self, els_client = None):
"""Reads the JSON representation of the author from ELSAPI.
Returns True if successful; else, False."""
if ElsProfile.read(self, self.__payload_type, els_client):
return True
else:
return False
|
python
|
def read(self, els_client = None):
"""Reads the JSON representation of the author from ELSAPI.
Returns True if successful; else, False."""
if ElsProfile.read(self, self.__payload_type, els_client):
return True
else:
return False
|
[
"def",
"read",
"(",
"self",
",",
"els_client",
"=",
"None",
")",
":",
"if",
"ElsProfile",
".",
"read",
"(",
"self",
",",
"self",
".",
"__payload_type",
",",
"els_client",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Reads the JSON representation of the author from ELSAPI.
Returns True if successful; else, False.
|
[
"Reads",
"the",
"JSON",
"representation",
"of",
"the",
"author",
"from",
"ELSAPI",
".",
"Returns",
"True",
"if",
"successful",
";",
"else",
"False",
"."
] |
a8a8b043816441e3ed0c834e792f7089231092da
|
https://github.com/ElsevierDev/elsapy/blob/a8a8b043816441e3ed0c834e792f7089231092da/elsapy/elsprofile.py#L124-L130
|
train
|
ElsevierDev/elsapy
|
elsapy/elsdoc.py
|
FullDoc.read
|
def read(self, els_client = None):
"""Reads the JSON representation of the document from ELSAPI.
Returns True if successful; else, False."""
if super().read(self.__payload_type, els_client):
return True
else:
return False
|
python
|
def read(self, els_client = None):
"""Reads the JSON representation of the document from ELSAPI.
Returns True if successful; else, False."""
if super().read(self.__payload_type, els_client):
return True
else:
return False
|
[
"def",
"read",
"(",
"self",
",",
"els_client",
"=",
"None",
")",
":",
"if",
"super",
"(",
")",
".",
"read",
"(",
"self",
".",
"__payload_type",
",",
"els_client",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Reads the JSON representation of the document from ELSAPI.
Returns True if successful; else, False.
|
[
"Reads",
"the",
"JSON",
"representation",
"of",
"the",
"document",
"from",
"ELSAPI",
".",
"Returns",
"True",
"if",
"successful",
";",
"else",
"False",
"."
] |
a8a8b043816441e3ed0c834e792f7089231092da
|
https://github.com/ElsevierDev/elsapy/blob/a8a8b043816441e3ed0c834e792f7089231092da/elsapy/elsdoc.py#L44-L50
|
train
|
althonos/pronto
|
pronto/parser/owl.py
|
OwlXMLParser._extract_obo_synonyms
|
def _extract_obo_synonyms(rawterm):
"""Extract the synonyms defined in the rawterm.
"""
synonyms = set()
# keys in rawterm that define a synonym
keys = set(owl_synonyms).intersection(rawterm.keys())
for k in keys:
for s in rawterm[k]:
synonyms.add(Synonym(s, owl_synonyms[k]))
return synonyms
|
python
|
def _extract_obo_synonyms(rawterm):
"""Extract the synonyms defined in the rawterm.
"""
synonyms = set()
# keys in rawterm that define a synonym
keys = set(owl_synonyms).intersection(rawterm.keys())
for k in keys:
for s in rawterm[k]:
synonyms.add(Synonym(s, owl_synonyms[k]))
return synonyms
|
[
"def",
"_extract_obo_synonyms",
"(",
"rawterm",
")",
":",
"synonyms",
"=",
"set",
"(",
")",
"# keys in rawterm that define a synonym",
"keys",
"=",
"set",
"(",
"owl_synonyms",
")",
".",
"intersection",
"(",
"rawterm",
".",
"keys",
"(",
")",
")",
"for",
"k",
"in",
"keys",
":",
"for",
"s",
"in",
"rawterm",
"[",
"k",
"]",
":",
"synonyms",
".",
"add",
"(",
"Synonym",
"(",
"s",
",",
"owl_synonyms",
"[",
"k",
"]",
")",
")",
"return",
"synonyms"
] |
Extract the synonyms defined in the rawterm.
|
[
"Extract",
"the",
"synonyms",
"defined",
"in",
"the",
"rawterm",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/owl.py#L152-L161
|
train
|
althonos/pronto
|
pronto/parser/owl.py
|
OwlXMLParser._extract_obo_relation
|
def _extract_obo_relation(cls, rawterm):
"""Extract the relationships defined in the rawterm.
"""
relations = {}
if 'subClassOf' in rawterm:
relations[Relationship('is_a')] = l = []
l.extend(map(cls._get_id_from_url, rawterm.pop('subClassOf')))
return relations
|
python
|
def _extract_obo_relation(cls, rawterm):
"""Extract the relationships defined in the rawterm.
"""
relations = {}
if 'subClassOf' in rawterm:
relations[Relationship('is_a')] = l = []
l.extend(map(cls._get_id_from_url, rawterm.pop('subClassOf')))
return relations
|
[
"def",
"_extract_obo_relation",
"(",
"cls",
",",
"rawterm",
")",
":",
"relations",
"=",
"{",
"}",
"if",
"'subClassOf'",
"in",
"rawterm",
":",
"relations",
"[",
"Relationship",
"(",
"'is_a'",
")",
"]",
"=",
"l",
"=",
"[",
"]",
"l",
".",
"extend",
"(",
"map",
"(",
"cls",
".",
"_get_id_from_url",
",",
"rawterm",
".",
"pop",
"(",
"'subClassOf'",
")",
")",
")",
"return",
"relations"
] |
Extract the relationships defined in the rawterm.
|
[
"Extract",
"the",
"relationships",
"defined",
"in",
"the",
"rawterm",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/owl.py#L164-L171
|
train
|
althonos/pronto
|
pronto/parser/owl.py
|
OwlXMLParser._relabel_to_obo
|
def _relabel_to_obo(d):
"""Change the keys of ``d`` to use Obo labels.
"""
return {
owl_to_obo.get(old_k, old_k): old_v
for old_k, old_v in six.iteritems(d)
}
|
python
|
def _relabel_to_obo(d):
"""Change the keys of ``d`` to use Obo labels.
"""
return {
owl_to_obo.get(old_k, old_k): old_v
for old_k, old_v in six.iteritems(d)
}
|
[
"def",
"_relabel_to_obo",
"(",
"d",
")",
":",
"return",
"{",
"owl_to_obo",
".",
"get",
"(",
"old_k",
",",
"old_k",
")",
":",
"old_v",
"for",
"old_k",
",",
"old_v",
"in",
"six",
".",
"iteritems",
"(",
"d",
")",
"}"
] |
Change the keys of ``d`` to use Obo labels.
|
[
"Change",
"the",
"keys",
"of",
"d",
"to",
"use",
"Obo",
"labels",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/owl.py#L174-L180
|
train
|
althonos/pronto
|
pronto/relationship.py
|
Relationship.complement
|
def complement(self):
"""Return the complementary relationship of self.
Raises:
ValueError: if the relationship has a complementary
which was not defined.
Returns:
complementary (Relationship): the complementary relationship.
Example:
>>> from pronto.relationship import Relationship
>>> print(Relationship('has_part').complement())
Relationship('part_of')
>>> print(Relationship('has_units').complement())
None
"""
if self.complementary:
#if self.complementary in self._instances.keys():
try:
return self._instances[self.complementary]
except KeyError:
raise ValueError('{} has a complementary but it was not defined !')
else:
return None
|
python
|
def complement(self):
"""Return the complementary relationship of self.
Raises:
ValueError: if the relationship has a complementary
which was not defined.
Returns:
complementary (Relationship): the complementary relationship.
Example:
>>> from pronto.relationship import Relationship
>>> print(Relationship('has_part').complement())
Relationship('part_of')
>>> print(Relationship('has_units').complement())
None
"""
if self.complementary:
#if self.complementary in self._instances.keys():
try:
return self._instances[self.complementary]
except KeyError:
raise ValueError('{} has a complementary but it was not defined !')
else:
return None
|
[
"def",
"complement",
"(",
"self",
")",
":",
"if",
"self",
".",
"complementary",
":",
"#if self.complementary in self._instances.keys():",
"try",
":",
"return",
"self",
".",
"_instances",
"[",
"self",
".",
"complementary",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'{} has a complementary but it was not defined !'",
")",
"else",
":",
"return",
"None"
] |
Return the complementary relationship of self.
Raises:
ValueError: if the relationship has a complementary
which was not defined.
Returns:
complementary (Relationship): the complementary relationship.
Example:
>>> from pronto.relationship import Relationship
>>> print(Relationship('has_part').complement())
Relationship('part_of')
>>> print(Relationship('has_units').complement())
None
|
[
"Return",
"the",
"complementary",
"relationship",
"of",
"self",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/relationship.py#L113-L141
|
train
|
althonos/pronto
|
pronto/relationship.py
|
Relationship.topdown
|
def topdown(cls):
"""Get all topdown `Relationship` instances.
Returns:
:obj:`generator`
Example:
>>> from pronto import Relationship
>>> for r in Relationship.topdown():
... print(r)
Relationship('can_be')
Relationship('has_part')
"""
return tuple(unique_everseen(r for r in cls._instances.values() if r.direction=='topdown'))
|
python
|
def topdown(cls):
"""Get all topdown `Relationship` instances.
Returns:
:obj:`generator`
Example:
>>> from pronto import Relationship
>>> for r in Relationship.topdown():
... print(r)
Relationship('can_be')
Relationship('has_part')
"""
return tuple(unique_everseen(r for r in cls._instances.values() if r.direction=='topdown'))
|
[
"def",
"topdown",
"(",
"cls",
")",
":",
"return",
"tuple",
"(",
"unique_everseen",
"(",
"r",
"for",
"r",
"in",
"cls",
".",
"_instances",
".",
"values",
"(",
")",
"if",
"r",
".",
"direction",
"==",
"'topdown'",
")",
")"
] |
Get all topdown `Relationship` instances.
Returns:
:obj:`generator`
Example:
>>> from pronto import Relationship
>>> for r in Relationship.topdown():
... print(r)
Relationship('can_be')
Relationship('has_part')
|
[
"Get",
"all",
"topdown",
"Relationship",
"instances",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/relationship.py#L174-L189
|
train
|
althonos/pronto
|
pronto/relationship.py
|
Relationship.bottomup
|
def bottomup(cls):
"""Get all bottomup `Relationship` instances.
Example:
>>> from pronto import Relationship
>>> for r in Relationship.bottomup():
... print(r)
Relationship('is_a')
Relationship('part_of')
Relationship('develops_from')
"""
return tuple(unique_everseen(r for r in cls._instances.values() if r.direction=='bottomup'))
|
python
|
def bottomup(cls):
"""Get all bottomup `Relationship` instances.
Example:
>>> from pronto import Relationship
>>> for r in Relationship.bottomup():
... print(r)
Relationship('is_a')
Relationship('part_of')
Relationship('develops_from')
"""
return tuple(unique_everseen(r for r in cls._instances.values() if r.direction=='bottomup'))
|
[
"def",
"bottomup",
"(",
"cls",
")",
":",
"return",
"tuple",
"(",
"unique_everseen",
"(",
"r",
"for",
"r",
"in",
"cls",
".",
"_instances",
".",
"values",
"(",
")",
"if",
"r",
".",
"direction",
"==",
"'bottomup'",
")",
")"
] |
Get all bottomup `Relationship` instances.
Example:
>>> from pronto import Relationship
>>> for r in Relationship.bottomup():
... print(r)
Relationship('is_a')
Relationship('part_of')
Relationship('develops_from')
|
[
"Get",
"all",
"bottomup",
"Relationship",
"instances",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/relationship.py#L192-L205
|
train
|
althonos/pronto
|
pronto/utils.py
|
unique_everseen
|
def unique_everseen(iterable):
"""List unique elements, preserving order. Remember all elements ever seen."""
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
seen = set()
seen_add = seen.add
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
|
python
|
def unique_everseen(iterable):
"""List unique elements, preserving order. Remember all elements ever seen."""
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
seen = set()
seen_add = seen.add
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
|
[
"def",
"unique_everseen",
"(",
"iterable",
")",
":",
"# unique_everseen('AAAABBBCCDAABBB') --> A B C D",
"seen",
"=",
"set",
"(",
")",
"seen_add",
"=",
"seen",
".",
"add",
"for",
"element",
"in",
"six",
".",
"moves",
".",
"filterfalse",
"(",
"seen",
".",
"__contains__",
",",
"iterable",
")",
":",
"seen_add",
"(",
"element",
")",
"yield",
"element"
] |
List unique elements, preserving order. Remember all elements ever seen.
|
[
"List",
"unique",
"elements",
"preserving",
"order",
".",
"Remember",
"all",
"elements",
"ever",
"seen",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/utils.py#L30-L38
|
train
|
althonos/pronto
|
pronto/utils.py
|
output_str
|
def output_str(f):
"""Create a function that always return instances of `str`.
This decorator is useful when the returned string is to be used
with libraries that do not support ̀`unicode` in Python 2, but work
fine with Python 3 `str` objects.
"""
if six.PY2:
#@functools.wraps(f)
def new_f(*args, **kwargs):
return f(*args, **kwargs).encode("utf-8")
else:
new_f = f
return new_f
|
python
|
def output_str(f):
"""Create a function that always return instances of `str`.
This decorator is useful when the returned string is to be used
with libraries that do not support ̀`unicode` in Python 2, but work
fine with Python 3 `str` objects.
"""
if six.PY2:
#@functools.wraps(f)
def new_f(*args, **kwargs):
return f(*args, **kwargs).encode("utf-8")
else:
new_f = f
return new_f
|
[
"def",
"output_str",
"(",
"f",
")",
":",
"if",
"six",
".",
"PY2",
":",
"#@functools.wraps(f)",
"def",
"new_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"else",
":",
"new_f",
"=",
"f",
"return",
"new_f"
] |
Create a function that always return instances of `str`.
This decorator is useful when the returned string is to be used
with libraries that do not support ̀`unicode` in Python 2, but work
fine with Python 3 `str` objects.
|
[
"Create",
"a",
"function",
"that",
"always",
"return",
"instances",
"of",
"str",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/utils.py#L40-L53
|
train
|
althonos/pronto
|
pronto/utils.py
|
nowarnings
|
def nowarnings(func):
"""Create a function wrapped in a context that ignores warnings.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return func(*args, **kwargs)
return new_func
|
python
|
def nowarnings(func):
"""Create a function wrapped in a context that ignores warnings.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return func(*args, **kwargs)
return new_func
|
[
"def",
"nowarnings",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"new_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_func"
] |
Create a function wrapped in a context that ignores warnings.
|
[
"Create",
"a",
"function",
"wrapped",
"in",
"a",
"context",
"that",
"ignores",
"warnings",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/utils.py#L55-L63
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology.parse
|
def parse(self, stream, parser=None):
"""Parse the given file using available `BaseParser` instances.
Raises:
TypeError: when the parser argument is not a string or None.
ValueError: when the parser argument is a string that does
not name a `BaseParser`.
"""
force, parsers = self._get_parsers(parser)
try:
stream.seek(0)
lookup = stream.read(1024)
stream.seek(0)
except (io.UnsupportedOperation, AttributeError):
lookup = None
for p in parsers:
if p.hook(path=self.path, force=force, lookup=lookup):
self.meta, self.terms, self.imports, self.typedefs = p.parse(stream)
self._parsed_by = p.__name__
break
|
python
|
def parse(self, stream, parser=None):
"""Parse the given file using available `BaseParser` instances.
Raises:
TypeError: when the parser argument is not a string or None.
ValueError: when the parser argument is a string that does
not name a `BaseParser`.
"""
force, parsers = self._get_parsers(parser)
try:
stream.seek(0)
lookup = stream.read(1024)
stream.seek(0)
except (io.UnsupportedOperation, AttributeError):
lookup = None
for p in parsers:
if p.hook(path=self.path, force=force, lookup=lookup):
self.meta, self.terms, self.imports, self.typedefs = p.parse(stream)
self._parsed_by = p.__name__
break
|
[
"def",
"parse",
"(",
"self",
",",
"stream",
",",
"parser",
"=",
"None",
")",
":",
"force",
",",
"parsers",
"=",
"self",
".",
"_get_parsers",
"(",
"parser",
")",
"try",
":",
"stream",
".",
"seek",
"(",
"0",
")",
"lookup",
"=",
"stream",
".",
"read",
"(",
"1024",
")",
"stream",
".",
"seek",
"(",
"0",
")",
"except",
"(",
"io",
".",
"UnsupportedOperation",
",",
"AttributeError",
")",
":",
"lookup",
"=",
"None",
"for",
"p",
"in",
"parsers",
":",
"if",
"p",
".",
"hook",
"(",
"path",
"=",
"self",
".",
"path",
",",
"force",
"=",
"force",
",",
"lookup",
"=",
"lookup",
")",
":",
"self",
".",
"meta",
",",
"self",
".",
"terms",
",",
"self",
".",
"imports",
",",
"self",
".",
"typedefs",
"=",
"p",
".",
"parse",
"(",
"stream",
")",
"self",
".",
"_parsed_by",
"=",
"p",
".",
"__name__",
"break"
] |
Parse the given file using available `BaseParser` instances.
Raises:
TypeError: when the parser argument is not a string or None.
ValueError: when the parser argument is a string that does
not name a `BaseParser`.
|
[
"Parse",
"the",
"given",
"file",
"using",
"available",
"BaseParser",
"instances",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L204-L226
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology._get_parsers
|
def _get_parsers(self, name):
"""Return the appropriate parser asked by the user.
Todo:
Change `Ontology._get_parsers` behaviour to look for parsers
through a setuptools entrypoint instead of mere subclasses.
"""
parserlist = BaseParser.__subclasses__()
forced = name is None
if isinstance(name, (six.text_type, six.binary_type)):
parserlist = [p for p in parserlist if p.__name__ == name]
if not parserlist:
raise ValueError("could not find parser: {}".format(name))
elif name is not None:
raise TypeError("parser must be {types} or None, not {actual}".format(
types=" or ".join([six.text_type.__name__, six.binary_type.__name__]),
actual=type(parser).__name__,
))
return not forced, parserlist
|
python
|
def _get_parsers(self, name):
"""Return the appropriate parser asked by the user.
Todo:
Change `Ontology._get_parsers` behaviour to look for parsers
through a setuptools entrypoint instead of mere subclasses.
"""
parserlist = BaseParser.__subclasses__()
forced = name is None
if isinstance(name, (six.text_type, six.binary_type)):
parserlist = [p for p in parserlist if p.__name__ == name]
if not parserlist:
raise ValueError("could not find parser: {}".format(name))
elif name is not None:
raise TypeError("parser must be {types} or None, not {actual}".format(
types=" or ".join([six.text_type.__name__, six.binary_type.__name__]),
actual=type(parser).__name__,
))
return not forced, parserlist
|
[
"def",
"_get_parsers",
"(",
"self",
",",
"name",
")",
":",
"parserlist",
"=",
"BaseParser",
".",
"__subclasses__",
"(",
")",
"forced",
"=",
"name",
"is",
"None",
"if",
"isinstance",
"(",
"name",
",",
"(",
"six",
".",
"text_type",
",",
"six",
".",
"binary_type",
")",
")",
":",
"parserlist",
"=",
"[",
"p",
"for",
"p",
"in",
"parserlist",
"if",
"p",
".",
"__name__",
"==",
"name",
"]",
"if",
"not",
"parserlist",
":",
"raise",
"ValueError",
"(",
"\"could not find parser: {}\"",
".",
"format",
"(",
"name",
")",
")",
"elif",
"name",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"parser must be {types} or None, not {actual}\"",
".",
"format",
"(",
"types",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"six",
".",
"text_type",
".",
"__name__",
",",
"six",
".",
"binary_type",
".",
"__name__",
"]",
")",
",",
"actual",
"=",
"type",
"(",
"parser",
")",
".",
"__name__",
",",
")",
")",
"return",
"not",
"forced",
",",
"parserlist"
] |
Return the appropriate parser asked by the user.
Todo:
Change `Ontology._get_parsers` behaviour to look for parsers
through a setuptools entrypoint instead of mere subclasses.
|
[
"Return",
"the",
"appropriate",
"parser",
"asked",
"by",
"the",
"user",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L228-L250
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology.adopt
|
def adopt(self):
"""Make terms aware of their children.
This is done automatically when using the `~Ontology.merge` and
`~Ontology.include` methods as well as the `~Ontology.__init__`
method, but it should be called in case of manual editing of the
parents or children of a `Term`.
"""
valid_relationships = set(Relationship._instances.keys())
relationships = [
(parent, relation.complement(), term.id)
for term in six.itervalues(self.terms)
for relation in term.relations
for parent in term.relations[relation]
if relation.complementary
and relation.complementary in valid_relationships
]
relationships.sort(key=operator.itemgetter(2))
for parent, rel, child in relationships:
if rel is None:
break
try:
parent = parent.id
except AttributeError:
pass
if parent in self.terms:
try:
if child not in self.terms[parent].relations[rel]:
self.terms[parent].relations[rel].append(child)
except KeyError:
self[parent].relations[rel] = [child]
del relationships
|
python
|
def adopt(self):
"""Make terms aware of their children.
This is done automatically when using the `~Ontology.merge` and
`~Ontology.include` methods as well as the `~Ontology.__init__`
method, but it should be called in case of manual editing of the
parents or children of a `Term`.
"""
valid_relationships = set(Relationship._instances.keys())
relationships = [
(parent, relation.complement(), term.id)
for term in six.itervalues(self.terms)
for relation in term.relations
for parent in term.relations[relation]
if relation.complementary
and relation.complementary in valid_relationships
]
relationships.sort(key=operator.itemgetter(2))
for parent, rel, child in relationships:
if rel is None:
break
try:
parent = parent.id
except AttributeError:
pass
if parent in self.terms:
try:
if child not in self.terms[parent].relations[rel]:
self.terms[parent].relations[rel].append(child)
except KeyError:
self[parent].relations[rel] = [child]
del relationships
|
[
"def",
"adopt",
"(",
"self",
")",
":",
"valid_relationships",
"=",
"set",
"(",
"Relationship",
".",
"_instances",
".",
"keys",
"(",
")",
")",
"relationships",
"=",
"[",
"(",
"parent",
",",
"relation",
".",
"complement",
"(",
")",
",",
"term",
".",
"id",
")",
"for",
"term",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"terms",
")",
"for",
"relation",
"in",
"term",
".",
"relations",
"for",
"parent",
"in",
"term",
".",
"relations",
"[",
"relation",
"]",
"if",
"relation",
".",
"complementary",
"and",
"relation",
".",
"complementary",
"in",
"valid_relationships",
"]",
"relationships",
".",
"sort",
"(",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"2",
")",
")",
"for",
"parent",
",",
"rel",
",",
"child",
"in",
"relationships",
":",
"if",
"rel",
"is",
"None",
":",
"break",
"try",
":",
"parent",
"=",
"parent",
".",
"id",
"except",
"AttributeError",
":",
"pass",
"if",
"parent",
"in",
"self",
".",
"terms",
":",
"try",
":",
"if",
"child",
"not",
"in",
"self",
".",
"terms",
"[",
"parent",
"]",
".",
"relations",
"[",
"rel",
"]",
":",
"self",
".",
"terms",
"[",
"parent",
"]",
".",
"relations",
"[",
"rel",
"]",
".",
"append",
"(",
"child",
")",
"except",
"KeyError",
":",
"self",
"[",
"parent",
"]",
".",
"relations",
"[",
"rel",
"]",
"=",
"[",
"child",
"]",
"del",
"relationships"
] |
Make terms aware of their children.
This is done automatically when using the `~Ontology.merge` and
`~Ontology.include` methods as well as the `~Ontology.__init__`
method, but it should be called in case of manual editing of the
parents or children of a `Term`.
|
[
"Make",
"terms",
"aware",
"of",
"their",
"children",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L253-L292
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology.reference
|
def reference(self):
"""Make relations point to ontology terms instead of term ids.
This is done automatically when using the :obj:`merge` and :obj:`include`
methods as well as the :obj:`__init__` method, but it should be called in
case of manual changes of the relationships of a Term.
"""
for termkey, termval in six.iteritems(self.terms):
termval.relations.update(
(relkey, TermList(
(self.terms.get(x) or Term(x, '', '')
if not isinstance(x, Term) else x) for x in relval
)) for relkey, relval in six.iteritems(termval.relations)
)
|
python
|
def reference(self):
"""Make relations point to ontology terms instead of term ids.
This is done automatically when using the :obj:`merge` and :obj:`include`
methods as well as the :obj:`__init__` method, but it should be called in
case of manual changes of the relationships of a Term.
"""
for termkey, termval in six.iteritems(self.terms):
termval.relations.update(
(relkey, TermList(
(self.terms.get(x) or Term(x, '', '')
if not isinstance(x, Term) else x) for x in relval
)) for relkey, relval in six.iteritems(termval.relations)
)
|
[
"def",
"reference",
"(",
"self",
")",
":",
"for",
"termkey",
",",
"termval",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"terms",
")",
":",
"termval",
".",
"relations",
".",
"update",
"(",
"(",
"relkey",
",",
"TermList",
"(",
"(",
"self",
".",
"terms",
".",
"get",
"(",
"x",
")",
"or",
"Term",
"(",
"x",
",",
"''",
",",
"''",
")",
"if",
"not",
"isinstance",
"(",
"x",
",",
"Term",
")",
"else",
"x",
")",
"for",
"x",
"in",
"relval",
")",
")",
"for",
"relkey",
",",
"relval",
"in",
"six",
".",
"iteritems",
"(",
"termval",
".",
"relations",
")",
")"
] |
Make relations point to ontology terms instead of term ids.
This is done automatically when using the :obj:`merge` and :obj:`include`
methods as well as the :obj:`__init__` method, but it should be called in
case of manual changes of the relationships of a Term.
|
[
"Make",
"relations",
"point",
"to",
"ontology",
"terms",
"instead",
"of",
"term",
"ids",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L294-L307
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology.resolve_imports
|
def resolve_imports(self, imports, import_depth, parser=None):
"""Import required ontologies.
"""
if imports and import_depth:
for i in list(self.imports):
try:
if os.path.exists(i) or i.startswith(('http', 'ftp')):
self.merge(Ontology(i, import_depth=import_depth-1, parser=parser))
else: # try to look at neighbouring ontologies
self.merge(Ontology( os.path.join(os.path.dirname(self.path), i),
import_depth=import_depth-1, parser=parser))
except (IOError, OSError, URLError, HTTPError, _etree.ParseError) as e:
warnings.warn("{} occured during import of "
"{}".format(type(e).__name__, i),
ProntoWarning)
|
python
|
def resolve_imports(self, imports, import_depth, parser=None):
"""Import required ontologies.
"""
if imports and import_depth:
for i in list(self.imports):
try:
if os.path.exists(i) or i.startswith(('http', 'ftp')):
self.merge(Ontology(i, import_depth=import_depth-1, parser=parser))
else: # try to look at neighbouring ontologies
self.merge(Ontology( os.path.join(os.path.dirname(self.path), i),
import_depth=import_depth-1, parser=parser))
except (IOError, OSError, URLError, HTTPError, _etree.ParseError) as e:
warnings.warn("{} occured during import of "
"{}".format(type(e).__name__, i),
ProntoWarning)
|
[
"def",
"resolve_imports",
"(",
"self",
",",
"imports",
",",
"import_depth",
",",
"parser",
"=",
"None",
")",
":",
"if",
"imports",
"and",
"import_depth",
":",
"for",
"i",
"in",
"list",
"(",
"self",
".",
"imports",
")",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"i",
")",
"or",
"i",
".",
"startswith",
"(",
"(",
"'http'",
",",
"'ftp'",
")",
")",
":",
"self",
".",
"merge",
"(",
"Ontology",
"(",
"i",
",",
"import_depth",
"=",
"import_depth",
"-",
"1",
",",
"parser",
"=",
"parser",
")",
")",
"else",
":",
"# try to look at neighbouring ontologies",
"self",
".",
"merge",
"(",
"Ontology",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"path",
")",
",",
"i",
")",
",",
"import_depth",
"=",
"import_depth",
"-",
"1",
",",
"parser",
"=",
"parser",
")",
")",
"except",
"(",
"IOError",
",",
"OSError",
",",
"URLError",
",",
"HTTPError",
",",
"_etree",
".",
"ParseError",
")",
"as",
"e",
":",
"warnings",
".",
"warn",
"(",
"\"{} occured during import of \"",
"\"{}\"",
".",
"format",
"(",
"type",
"(",
"e",
")",
".",
"__name__",
",",
"i",
")",
",",
"ProntoWarning",
")"
] |
Import required ontologies.
|
[
"Import",
"required",
"ontologies",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L309-L326
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology.include
|
def include(self, *terms):
"""Add new terms to the current ontology.
Raises:
TypeError: when the arguments is (are) neither a TermList nor a Term.
Note:
This will also recursively include terms in the term's relations
dictionnary, but it is considered bad practice to do so. If you
want to create your own ontology, you should only add an ID (such
as 'ONT:001') to your terms relations, and let the Ontology link
terms with each other.
Examples:
Create a new ontology from scratch
>>> from pronto import Term, Relationship
>>> t1 = Term('ONT:001','my 1st term',
... 'this is my first term')
>>> t2 = Term('ONT:002', 'my 2nd term',
... 'this is my second term',
... {Relationship('part_of'): ['ONT:001']})
>>> ont = Ontology()
>>> ont.include(t1, t2)
>>>
>>> 'ONT:002' in ont
True
>>> ont['ONT:001'].children
[<ONT:002: my 2nd term>]
"""
ref_needed = False
for term in terms:
if isinstance(term, TermList):
ref_needed = ref_needed or self._include_term_list(term)
elif isinstance(term, Term):
ref_needed = ref_needed or self._include_term(term)
else:
raise TypeError('include only accepts <Term> or <TermList> as arguments')
self.adopt()
self.reference()
|
python
|
def include(self, *terms):
"""Add new terms to the current ontology.
Raises:
TypeError: when the arguments is (are) neither a TermList nor a Term.
Note:
This will also recursively include terms in the term's relations
dictionnary, but it is considered bad practice to do so. If you
want to create your own ontology, you should only add an ID (such
as 'ONT:001') to your terms relations, and let the Ontology link
terms with each other.
Examples:
Create a new ontology from scratch
>>> from pronto import Term, Relationship
>>> t1 = Term('ONT:001','my 1st term',
... 'this is my first term')
>>> t2 = Term('ONT:002', 'my 2nd term',
... 'this is my second term',
... {Relationship('part_of'): ['ONT:001']})
>>> ont = Ontology()
>>> ont.include(t1, t2)
>>>
>>> 'ONT:002' in ont
True
>>> ont['ONT:001'].children
[<ONT:002: my 2nd term>]
"""
ref_needed = False
for term in terms:
if isinstance(term, TermList):
ref_needed = ref_needed or self._include_term_list(term)
elif isinstance(term, Term):
ref_needed = ref_needed or self._include_term(term)
else:
raise TypeError('include only accepts <Term> or <TermList> as arguments')
self.adopt()
self.reference()
|
[
"def",
"include",
"(",
"self",
",",
"*",
"terms",
")",
":",
"ref_needed",
"=",
"False",
"for",
"term",
"in",
"terms",
":",
"if",
"isinstance",
"(",
"term",
",",
"TermList",
")",
":",
"ref_needed",
"=",
"ref_needed",
"or",
"self",
".",
"_include_term_list",
"(",
"term",
")",
"elif",
"isinstance",
"(",
"term",
",",
"Term",
")",
":",
"ref_needed",
"=",
"ref_needed",
"or",
"self",
".",
"_include_term",
"(",
"term",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'include only accepts <Term> or <TermList> as arguments'",
")",
"self",
".",
"adopt",
"(",
")",
"self",
".",
"reference",
"(",
")"
] |
Add new terms to the current ontology.
Raises:
TypeError: when the arguments is (are) neither a TermList nor a Term.
Note:
This will also recursively include terms in the term's relations
dictionnary, but it is considered bad practice to do so. If you
want to create your own ontology, you should only add an ID (such
as 'ONT:001') to your terms relations, and let the Ontology link
terms with each other.
Examples:
Create a new ontology from scratch
>>> from pronto import Term, Relationship
>>> t1 = Term('ONT:001','my 1st term',
... 'this is my first term')
>>> t2 = Term('ONT:002', 'my 2nd term',
... 'this is my second term',
... {Relationship('part_of'): ['ONT:001']})
>>> ont = Ontology()
>>> ont.include(t1, t2)
>>>
>>> 'ONT:002' in ont
True
>>> ont['ONT:001'].children
[<ONT:002: my 2nd term>]
|
[
"Add",
"new",
"terms",
"to",
"the",
"current",
"ontology",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L328-L371
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology.merge
|
def merge(self, other):
"""Merge another ontology into the current one.
Raises:
TypeError: When argument is not an Ontology object.
Example:
>>> from pronto import Ontology
>>> nmr = Ontology('tests/resources/nmrCV.owl', False)
>>> po = Ontology('tests/resources/po.obo.gz', False)
>>> 'NMR:1000271' in nmr
True
>>> 'NMR:1000271' in po
False
>>> po.merge(nmr)
>>> 'NMR:1000271' in po
True
"""
if not isinstance(other, Ontology):
raise TypeError("'merge' requires an Ontology as argument,"
" not {}".format(type(other)))
self.terms.update(other.terms)
self._empty_cache()
self.adopt()
self.reference()
|
python
|
def merge(self, other):
"""Merge another ontology into the current one.
Raises:
TypeError: When argument is not an Ontology object.
Example:
>>> from pronto import Ontology
>>> nmr = Ontology('tests/resources/nmrCV.owl', False)
>>> po = Ontology('tests/resources/po.obo.gz', False)
>>> 'NMR:1000271' in nmr
True
>>> 'NMR:1000271' in po
False
>>> po.merge(nmr)
>>> 'NMR:1000271' in po
True
"""
if not isinstance(other, Ontology):
raise TypeError("'merge' requires an Ontology as argument,"
" not {}".format(type(other)))
self.terms.update(other.terms)
self._empty_cache()
self.adopt()
self.reference()
|
[
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"Ontology",
")",
":",
"raise",
"TypeError",
"(",
"\"'merge' requires an Ontology as argument,\"",
"\" not {}\"",
".",
"format",
"(",
"type",
"(",
"other",
")",
")",
")",
"self",
".",
"terms",
".",
"update",
"(",
"other",
".",
"terms",
")",
"self",
".",
"_empty_cache",
"(",
")",
"self",
".",
"adopt",
"(",
")",
"self",
".",
"reference",
"(",
")"
] |
Merge another ontology into the current one.
Raises:
TypeError: When argument is not an Ontology object.
Example:
>>> from pronto import Ontology
>>> nmr = Ontology('tests/resources/nmrCV.owl', False)
>>> po = Ontology('tests/resources/po.obo.gz', False)
>>> 'NMR:1000271' in nmr
True
>>> 'NMR:1000271' in po
False
>>> po.merge(nmr)
>>> 'NMR:1000271' in po
True
|
[
"Merge",
"another",
"ontology",
"into",
"the",
"current",
"one",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L373-L399
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology._include_term_list
|
def _include_term_list(self, termlist):
"""Add terms from a TermList to the ontology.
"""
ref_needed = False
for term in termlist:
ref_needed = ref_needed or self._include_term(term)
return ref_needed
|
python
|
def _include_term_list(self, termlist):
"""Add terms from a TermList to the ontology.
"""
ref_needed = False
for term in termlist:
ref_needed = ref_needed or self._include_term(term)
return ref_needed
|
[
"def",
"_include_term_list",
"(",
"self",
",",
"termlist",
")",
":",
"ref_needed",
"=",
"False",
"for",
"term",
"in",
"termlist",
":",
"ref_needed",
"=",
"ref_needed",
"or",
"self",
".",
"_include_term",
"(",
"term",
")",
"return",
"ref_needed"
] |
Add terms from a TermList to the ontology.
|
[
"Add",
"terms",
"from",
"a",
"TermList",
"to",
"the",
"ontology",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L427-L433
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology._include_term
|
def _include_term(self, term):
"""Add a single term to the current ontology.
It is needed to dereference any term in the term's relationship
and then to build the reference again to make sure the other
terms referenced in the term's relations are the one contained
in the ontology (to make sure changes to one term in the ontology
will be applied to every other term related to that term).
"""
ref_needed = False
if term.relations:
for k,v in six.iteritems(term.relations):
for i,t in enumerate(v):
#if isinstance(t, Term):
try:
if t.id not in self:
self._include_term(t)
v[i] = t.id
except AttributeError:
pass
ref_needed = True
self.terms[term.id] = term
return ref_needed
|
python
|
def _include_term(self, term):
"""Add a single term to the current ontology.
It is needed to dereference any term in the term's relationship
and then to build the reference again to make sure the other
terms referenced in the term's relations are the one contained
in the ontology (to make sure changes to one term in the ontology
will be applied to every other term related to that term).
"""
ref_needed = False
if term.relations:
for k,v in six.iteritems(term.relations):
for i,t in enumerate(v):
#if isinstance(t, Term):
try:
if t.id not in self:
self._include_term(t)
v[i] = t.id
except AttributeError:
pass
ref_needed = True
self.terms[term.id] = term
return ref_needed
|
[
"def",
"_include_term",
"(",
"self",
",",
"term",
")",
":",
"ref_needed",
"=",
"False",
"if",
"term",
".",
"relations",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"term",
".",
"relations",
")",
":",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"v",
")",
":",
"#if isinstance(t, Term):",
"try",
":",
"if",
"t",
".",
"id",
"not",
"in",
"self",
":",
"self",
".",
"_include_term",
"(",
"t",
")",
"v",
"[",
"i",
"]",
"=",
"t",
".",
"id",
"except",
"AttributeError",
":",
"pass",
"ref_needed",
"=",
"True",
"self",
".",
"terms",
"[",
"term",
".",
"id",
"]",
"=",
"term",
"return",
"ref_needed"
] |
Add a single term to the current ontology.
It is needed to dereference any term in the term's relationship
and then to build the reference again to make sure the other
terms referenced in the term's relations are the one contained
in the ontology (to make sure changes to one term in the ontology
will be applied to every other term related to that term).
|
[
"Add",
"a",
"single",
"term",
"to",
"the",
"current",
"ontology",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L435-L465
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology._empty_cache
|
def _empty_cache(self, termlist=None):
"""Empty the cache associated with each `Term` instance.
This method is called when merging Ontologies or including
new terms in the Ontology to make sure the cache of each
term is cleaned and avoid returning wrong memoized values
(such as Term.rchildren() TermLists, which get memoized for
performance concerns)
"""
if termlist is None:
for term in six.itervalues(self.terms):
term._empty_cache()
else:
for term in termlist:
try:
self.terms[term.id]._empty_cache()
except AttributeError:
self.terms[term]._empty_cache()
|
python
|
def _empty_cache(self, termlist=None):
"""Empty the cache associated with each `Term` instance.
This method is called when merging Ontologies or including
new terms in the Ontology to make sure the cache of each
term is cleaned and avoid returning wrong memoized values
(such as Term.rchildren() TermLists, which get memoized for
performance concerns)
"""
if termlist is None:
for term in six.itervalues(self.terms):
term._empty_cache()
else:
for term in termlist:
try:
self.terms[term.id]._empty_cache()
except AttributeError:
self.terms[term]._empty_cache()
|
[
"def",
"_empty_cache",
"(",
"self",
",",
"termlist",
"=",
"None",
")",
":",
"if",
"termlist",
"is",
"None",
":",
"for",
"term",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"terms",
")",
":",
"term",
".",
"_empty_cache",
"(",
")",
"else",
":",
"for",
"term",
"in",
"termlist",
":",
"try",
":",
"self",
".",
"terms",
"[",
"term",
".",
"id",
"]",
".",
"_empty_cache",
"(",
")",
"except",
"AttributeError",
":",
"self",
".",
"terms",
"[",
"term",
"]",
".",
"_empty_cache",
"(",
")"
] |
Empty the cache associated with each `Term` instance.
This method is called when merging Ontologies or including
new terms in the Ontology to make sure the cache of each
term is cleaned and avoid returning wrong memoized values
(such as Term.rchildren() TermLists, which get memoized for
performance concerns)
|
[
"Empty",
"the",
"cache",
"associated",
"with",
"each",
"Term",
"instance",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L467-L484
|
train
|
althonos/pronto
|
pronto/ontology.py
|
Ontology._obo_meta
|
def _obo_meta(self):
"""Generate the obo metadata header and updates metadata.
When called, this method will create appropriate values for the
``auto-generated-by`` and ``date`` fields.
Note:
Generated following specs of the unofficial format guide:
ftp://ftp.geneontology.org/pub/go/www/GO.format.obo-1_4.shtml
"""
metatags = (
"format-version", "data-version", "date", "saved-by",
"auto-generated-by", "import", "subsetdef", "synonymtypedef",
"default-namespace", "namespace-id-rule", "idspace",
"treat-xrefs-as-equivalent", "treat-xrefs-as-genus-differentia",
"treat-xrefs-as-is_a", "remark", "ontology"
)
meta = self.meta.copy()
meta['auto-generated-by'] = ['pronto v{}'.format(__version__)]
meta['date'] = [datetime.datetime.now().strftime('%d:%m:%Y %H:%M')]
obo_meta = "\n".join(
[ # official obo tags
x.obo if hasattr(x, 'obo') \
else "{}: {}".format(k,x)
for k in metatags[:-1]
for x in meta.get(k, ())
] + [ # eventual other metadata added to remarksmock.patch in production code
"remark: {}: {}".format(k, x)
for k,v in sorted(six.iteritems(meta), key=operator.itemgetter(0))
for x in v
if k not in metatags
] + ( ["ontology: {}".format(x) for x in meta["ontology"]]
if "ontology" in meta
else ["ontology: {}".format(meta["namespace"][0].lower())]
if "namespace" in meta
else [])
)
return obo_meta
|
python
|
def _obo_meta(self):
"""Generate the obo metadata header and updates metadata.
When called, this method will create appropriate values for the
``auto-generated-by`` and ``date`` fields.
Note:
Generated following specs of the unofficial format guide:
ftp://ftp.geneontology.org/pub/go/www/GO.format.obo-1_4.shtml
"""
metatags = (
"format-version", "data-version", "date", "saved-by",
"auto-generated-by", "import", "subsetdef", "synonymtypedef",
"default-namespace", "namespace-id-rule", "idspace",
"treat-xrefs-as-equivalent", "treat-xrefs-as-genus-differentia",
"treat-xrefs-as-is_a", "remark", "ontology"
)
meta = self.meta.copy()
meta['auto-generated-by'] = ['pronto v{}'.format(__version__)]
meta['date'] = [datetime.datetime.now().strftime('%d:%m:%Y %H:%M')]
obo_meta = "\n".join(
[ # official obo tags
x.obo if hasattr(x, 'obo') \
else "{}: {}".format(k,x)
for k in metatags[:-1]
for x in meta.get(k, ())
] + [ # eventual other metadata added to remarksmock.patch in production code
"remark: {}: {}".format(k, x)
for k,v in sorted(six.iteritems(meta), key=operator.itemgetter(0))
for x in v
if k not in metatags
] + ( ["ontology: {}".format(x) for x in meta["ontology"]]
if "ontology" in meta
else ["ontology: {}".format(meta["namespace"][0].lower())]
if "namespace" in meta
else [])
)
return obo_meta
|
[
"def",
"_obo_meta",
"(",
"self",
")",
":",
"metatags",
"=",
"(",
"\"format-version\"",
",",
"\"data-version\"",
",",
"\"date\"",
",",
"\"saved-by\"",
",",
"\"auto-generated-by\"",
",",
"\"import\"",
",",
"\"subsetdef\"",
",",
"\"synonymtypedef\"",
",",
"\"default-namespace\"",
",",
"\"namespace-id-rule\"",
",",
"\"idspace\"",
",",
"\"treat-xrefs-as-equivalent\"",
",",
"\"treat-xrefs-as-genus-differentia\"",
",",
"\"treat-xrefs-as-is_a\"",
",",
"\"remark\"",
",",
"\"ontology\"",
")",
"meta",
"=",
"self",
".",
"meta",
".",
"copy",
"(",
")",
"meta",
"[",
"'auto-generated-by'",
"]",
"=",
"[",
"'pronto v{}'",
".",
"format",
"(",
"__version__",
")",
"]",
"meta",
"[",
"'date'",
"]",
"=",
"[",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%d:%m:%Y %H:%M'",
")",
"]",
"obo_meta",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"# official obo tags",
"x",
".",
"obo",
"if",
"hasattr",
"(",
"x",
",",
"'obo'",
")",
"else",
"\"{}: {}\"",
".",
"format",
"(",
"k",
",",
"x",
")",
"for",
"k",
"in",
"metatags",
"[",
":",
"-",
"1",
"]",
"for",
"x",
"in",
"meta",
".",
"get",
"(",
"k",
",",
"(",
")",
")",
"]",
"+",
"[",
"# eventual other metadata added to remarksmock.patch in production code",
"\"remark: {}: {}\"",
".",
"format",
"(",
"k",
",",
"x",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"meta",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"for",
"x",
"in",
"v",
"if",
"k",
"not",
"in",
"metatags",
"]",
"+",
"(",
"[",
"\"ontology: {}\"",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"meta",
"[",
"\"ontology\"",
"]",
"]",
"if",
"\"ontology\"",
"in",
"meta",
"else",
"[",
"\"ontology: {}\"",
".",
"format",
"(",
"meta",
"[",
"\"namespace\"",
"]",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
"]",
"if",
"\"namespace\"",
"in",
"meta",
"else",
"[",
"]",
")",
")",
"return",
"obo_meta"
] |
Generate the obo metadata header and updates metadata.
When called, this method will create appropriate values for the
``auto-generated-by`` and ``date`` fields.
Note:
Generated following specs of the unofficial format guide:
ftp://ftp.geneontology.org/pub/go/www/GO.format.obo-1_4.shtml
|
[
"Generate",
"the",
"obo",
"metadata",
"header",
"and",
"updates",
"metadata",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L487-L529
|
train
|
althonos/pronto
|
pronto/term.py
|
Term._empty_cache
|
def _empty_cache(self):
"""Empty the cache of the Term's memoized functions.
"""
self._children, self._parents = None, None
self._rchildren, self._rparents = {}, {}
|
python
|
def _empty_cache(self):
"""Empty the cache of the Term's memoized functions.
"""
self._children, self._parents = None, None
self._rchildren, self._rparents = {}, {}
|
[
"def",
"_empty_cache",
"(",
"self",
")",
":",
"self",
".",
"_children",
",",
"self",
".",
"_parents",
"=",
"None",
",",
"None",
"self",
".",
"_rchildren",
",",
"self",
".",
"_rparents",
"=",
"{",
"}",
",",
"{",
"}"
] |
Empty the cache of the Term's memoized functions.
|
[
"Empty",
"the",
"cache",
"of",
"the",
"Term",
"s",
"memoized",
"functions",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/term.py#L250-L254
|
train
|
althonos/pronto
|
pronto/parser/obo.py
|
OboParser._check_section
|
def _check_section(line, section):
"""Update the section being parsed.
The parser starts in the `OboSection.meta` section but once
it reaches the first ``[Typedef]``, it will enter the
`OboSection.typedef` section, and/or when it reaches the first
``[Term]``, it will enter the `OboSection.term` section.
"""
if "[Term]" in line:
section = OboSection.term
elif "[Typedef]" in line:
section = OboSection.typedef
return section
|
python
|
def _check_section(line, section):
"""Update the section being parsed.
The parser starts in the `OboSection.meta` section but once
it reaches the first ``[Typedef]``, it will enter the
`OboSection.typedef` section, and/or when it reaches the first
``[Term]``, it will enter the `OboSection.term` section.
"""
if "[Term]" in line:
section = OboSection.term
elif "[Typedef]" in line:
section = OboSection.typedef
return section
|
[
"def",
"_check_section",
"(",
"line",
",",
"section",
")",
":",
"if",
"\"[Term]\"",
"in",
"line",
":",
"section",
"=",
"OboSection",
".",
"term",
"elif",
"\"[Typedef]\"",
"in",
"line",
":",
"section",
"=",
"OboSection",
".",
"typedef",
"return",
"section"
] |
Update the section being parsed.
The parser starts in the `OboSection.meta` section but once
it reaches the first ``[Typedef]``, it will enter the
`OboSection.typedef` section, and/or when it reaches the first
``[Term]``, it will enter the `OboSection.term` section.
|
[
"Update",
"the",
"section",
"being",
"parsed",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/obo.py#L86-L99
|
train
|
althonos/pronto
|
pronto/parser/obo.py
|
OboParser._parse_metadata
|
def _parse_metadata(cls, line, meta, parse_remarks=True):
"""Parse a metadata line.
The metadata is organized as a ``key: value`` statement which
is split into the proper key and the proper value.
Arguments:
line (str): the line containing the metadata
parse_remarks(bool, optional): set to `False` to avoid
parsing the remarks.
Note:
If the line follows the following schema:
``remark: key: value``, the function will attempt to extract
the proper key/value instead of leaving everything inside
the remark key.
This may cause issues when the line is identified as such
even though the remark is simply a sentence containing a
colon, such as ``remark: 090506 "Attribute"`` in Term
deleted and new entries: Scan Type [...]"
(found in imagingMS.obo). To prevent the splitting from
happening, the text on the left of the colon must be less
that *20 chars long*.
"""
key, value = line.split(':', 1)
key, value = key.strip(), value.strip()
if parse_remarks and "remark" in key: # Checking that the ':' is not
if 0<value.find(': ')<20: # not too far avoid parsing a sentence
try: # containing a ':' as a key: value
cls._parse_metadata(value, meta, parse_remarks) # obo statement nested in a remark
except ValueError: # (20 is arbitrary, it may require
pass # tweaking)
else:
meta[key].append(value)
try:
syn_type_def = []
for m in meta['synonymtypedef']:
if not isinstance(m, SynonymType):
x = SynonymType.from_obo(m)
syn_type_def.append(x)
else:
syn_type_def.append(m)
except KeyError:
pass
else:
meta['synonymtypedef'] = syn_type_def
|
python
|
def _parse_metadata(cls, line, meta, parse_remarks=True):
"""Parse a metadata line.
The metadata is organized as a ``key: value`` statement which
is split into the proper key and the proper value.
Arguments:
line (str): the line containing the metadata
parse_remarks(bool, optional): set to `False` to avoid
parsing the remarks.
Note:
If the line follows the following schema:
``remark: key: value``, the function will attempt to extract
the proper key/value instead of leaving everything inside
the remark key.
This may cause issues when the line is identified as such
even though the remark is simply a sentence containing a
colon, such as ``remark: 090506 "Attribute"`` in Term
deleted and new entries: Scan Type [...]"
(found in imagingMS.obo). To prevent the splitting from
happening, the text on the left of the colon must be less
that *20 chars long*.
"""
key, value = line.split(':', 1)
key, value = key.strip(), value.strip()
if parse_remarks and "remark" in key: # Checking that the ':' is not
if 0<value.find(': ')<20: # not too far avoid parsing a sentence
try: # containing a ':' as a key: value
cls._parse_metadata(value, meta, parse_remarks) # obo statement nested in a remark
except ValueError: # (20 is arbitrary, it may require
pass # tweaking)
else:
meta[key].append(value)
try:
syn_type_def = []
for m in meta['synonymtypedef']:
if not isinstance(m, SynonymType):
x = SynonymType.from_obo(m)
syn_type_def.append(x)
else:
syn_type_def.append(m)
except KeyError:
pass
else:
meta['synonymtypedef'] = syn_type_def
|
[
"def",
"_parse_metadata",
"(",
"cls",
",",
"line",
",",
"meta",
",",
"parse_remarks",
"=",
"True",
")",
":",
"key",
",",
"value",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"key",
",",
"value",
"=",
"key",
".",
"strip",
"(",
")",
",",
"value",
".",
"strip",
"(",
")",
"if",
"parse_remarks",
"and",
"\"remark\"",
"in",
"key",
":",
"# Checking that the ':' is not",
"if",
"0",
"<",
"value",
".",
"find",
"(",
"': '",
")",
"<",
"20",
":",
"# not too far avoid parsing a sentence",
"try",
":",
"# containing a ':' as a key: value",
"cls",
".",
"_parse_metadata",
"(",
"value",
",",
"meta",
",",
"parse_remarks",
")",
"# obo statement nested in a remark",
"except",
"ValueError",
":",
"# (20 is arbitrary, it may require",
"pass",
"# tweaking)",
"else",
":",
"meta",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"try",
":",
"syn_type_def",
"=",
"[",
"]",
"for",
"m",
"in",
"meta",
"[",
"'synonymtypedef'",
"]",
":",
"if",
"not",
"isinstance",
"(",
"m",
",",
"SynonymType",
")",
":",
"x",
"=",
"SynonymType",
".",
"from_obo",
"(",
"m",
")",
"syn_type_def",
".",
"append",
"(",
"x",
")",
"else",
":",
"syn_type_def",
".",
"append",
"(",
"m",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"meta",
"[",
"'synonymtypedef'",
"]",
"=",
"syn_type_def"
] |
Parse a metadata line.
The metadata is organized as a ``key: value`` statement which
is split into the proper key and the proper value.
Arguments:
line (str): the line containing the metadata
parse_remarks(bool, optional): set to `False` to avoid
parsing the remarks.
Note:
If the line follows the following schema:
``remark: key: value``, the function will attempt to extract
the proper key/value instead of leaving everything inside
the remark key.
This may cause issues when the line is identified as such
even though the remark is simply a sentence containing a
colon, such as ``remark: 090506 "Attribute"`` in Term
deleted and new entries: Scan Type [...]"
(found in imagingMS.obo). To prevent the splitting from
happening, the text on the left of the colon must be less
that *20 chars long*.
|
[
"Parse",
"a",
"metadata",
"line",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/obo.py#L102-L149
|
train
|
althonos/pronto
|
pronto/parser/obo.py
|
OboParser._parse_typedef
|
def _parse_typedef(line, _rawtypedef):
"""Parse a typedef line.
The typedef is organized as a succesion of ``key:value`` pairs
that are extracted into the same dictionnary until a new
header is encountered
Arguments:
line (str): the line containing a typedef statement
"""
if "[Typedef]" in line:
_rawtypedef.append(collections.defaultdict(list))
else:
key, value = line.split(':', 1)
_rawtypedef[-1][key.strip()].append(value.strip())
|
python
|
def _parse_typedef(line, _rawtypedef):
"""Parse a typedef line.
The typedef is organized as a succesion of ``key:value`` pairs
that are extracted into the same dictionnary until a new
header is encountered
Arguments:
line (str): the line containing a typedef statement
"""
if "[Typedef]" in line:
_rawtypedef.append(collections.defaultdict(list))
else:
key, value = line.split(':', 1)
_rawtypedef[-1][key.strip()].append(value.strip())
|
[
"def",
"_parse_typedef",
"(",
"line",
",",
"_rawtypedef",
")",
":",
"if",
"\"[Typedef]\"",
"in",
"line",
":",
"_rawtypedef",
".",
"append",
"(",
"collections",
".",
"defaultdict",
"(",
"list",
")",
")",
"else",
":",
"key",
",",
"value",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"_rawtypedef",
"[",
"-",
"1",
"]",
"[",
"key",
".",
"strip",
"(",
")",
"]",
".",
"append",
"(",
"value",
".",
"strip",
"(",
")",
")"
] |
Parse a typedef line.
The typedef is organized as a succesion of ``key:value`` pairs
that are extracted into the same dictionnary until a new
header is encountered
Arguments:
line (str): the line containing a typedef statement
|
[
"Parse",
"a",
"typedef",
"line",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/obo.py#L152-L166
|
train
|
althonos/pronto
|
pronto/parser/obo.py
|
OboParser._parse_term
|
def _parse_term(_rawterms):
"""Parse a term line.
The term is organized as a succesion of ``key:value`` pairs
that are extracted into the same dictionnary until a new
header is encountered
Arguments:
line (str): the line containing a term statement
"""
line = yield
_rawterms.append(collections.defaultdict(list))
while True:
line = yield
if "[Term]" in line:
_rawterms.append(collections.defaultdict(list))
else:
key, value = line.split(':', 1)
_rawterms[-1][key.strip()].append(value.strip())
|
python
|
def _parse_term(_rawterms):
"""Parse a term line.
The term is organized as a succesion of ``key:value`` pairs
that are extracted into the same dictionnary until a new
header is encountered
Arguments:
line (str): the line containing a term statement
"""
line = yield
_rawterms.append(collections.defaultdict(list))
while True:
line = yield
if "[Term]" in line:
_rawterms.append(collections.defaultdict(list))
else:
key, value = line.split(':', 1)
_rawterms[-1][key.strip()].append(value.strip())
|
[
"def",
"_parse_term",
"(",
"_rawterms",
")",
":",
"line",
"=",
"yield",
"_rawterms",
".",
"append",
"(",
"collections",
".",
"defaultdict",
"(",
"list",
")",
")",
"while",
"True",
":",
"line",
"=",
"yield",
"if",
"\"[Term]\"",
"in",
"line",
":",
"_rawterms",
".",
"append",
"(",
"collections",
".",
"defaultdict",
"(",
"list",
")",
")",
"else",
":",
"key",
",",
"value",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"_rawterms",
"[",
"-",
"1",
"]",
"[",
"key",
".",
"strip",
"(",
")",
"]",
".",
"append",
"(",
"value",
".",
"strip",
"(",
")",
")"
] |
Parse a term line.
The term is organized as a succesion of ``key:value`` pairs
that are extracted into the same dictionnary until a new
header is encountered
Arguments:
line (str): the line containing a term statement
|
[
"Parse",
"a",
"term",
"line",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/obo.py#L170-L188
|
train
|
althonos/pronto
|
pronto/parser/obo.py
|
OboParser._classify
|
def _classify(_rawtypedef, _rawterms):
"""Create proper objects out of extracted dictionnaries.
New Relationship objects are instantiated with the help of
the `Relationship._from_obo_dict` alternate constructor.
New `Term` objects are instantiated by manually extracting id,
name, desc and relationships out of the ``_rawterm``
dictionnary, and then calling the default constructor.
"""
terms = collections.OrderedDict()
_cached_synonyms = {}
typedefs = [
Relationship._from_obo_dict( # instantiate a new Relationship
{k:v for k,lv in six.iteritems(_typedef) for v in lv}
)
for _typedef in _rawtypedef
]
for _term in _rawterms:
synonyms = set()
_id = _term['id'][0]
_name = _term.pop('name', ('',))[0]
_desc = _term.pop('def', ('',))[0]
_relations = collections.defaultdict(list)
try:
for other in _term.get('is_a', ()):
_relations[Relationship('is_a')].append(other.split('!')[0].strip())
except IndexError:
pass
try:
for relname, other in ( x.split(' ', 1) for x in _term.pop('relationship', ())):
_relations[Relationship(relname)].append(other.split('!')[0].strip())
except IndexError:
pass
for key, scope in six.iteritems(_obo_synonyms_map):
for obo_header in _term.pop(key, ()):
try:
s = _cached_synonyms[obo_header]
except KeyError:
s = Synonym.from_obo(obo_header, scope)
_cached_synonyms[obo_header] = s
finally:
synonyms.add(s)
desc = Description.from_obo(_desc) if _desc else Description("")
terms[_id] = Term(_id, _name, desc, dict(_relations), synonyms, dict(_term))
return terms, typedefs
|
python
|
def _classify(_rawtypedef, _rawterms):
"""Create proper objects out of extracted dictionnaries.
New Relationship objects are instantiated with the help of
the `Relationship._from_obo_dict` alternate constructor.
New `Term` objects are instantiated by manually extracting id,
name, desc and relationships out of the ``_rawterm``
dictionnary, and then calling the default constructor.
"""
terms = collections.OrderedDict()
_cached_synonyms = {}
typedefs = [
Relationship._from_obo_dict( # instantiate a new Relationship
{k:v for k,lv in six.iteritems(_typedef) for v in lv}
)
for _typedef in _rawtypedef
]
for _term in _rawterms:
synonyms = set()
_id = _term['id'][0]
_name = _term.pop('name', ('',))[0]
_desc = _term.pop('def', ('',))[0]
_relations = collections.defaultdict(list)
try:
for other in _term.get('is_a', ()):
_relations[Relationship('is_a')].append(other.split('!')[0].strip())
except IndexError:
pass
try:
for relname, other in ( x.split(' ', 1) for x in _term.pop('relationship', ())):
_relations[Relationship(relname)].append(other.split('!')[0].strip())
except IndexError:
pass
for key, scope in six.iteritems(_obo_synonyms_map):
for obo_header in _term.pop(key, ()):
try:
s = _cached_synonyms[obo_header]
except KeyError:
s = Synonym.from_obo(obo_header, scope)
_cached_synonyms[obo_header] = s
finally:
synonyms.add(s)
desc = Description.from_obo(_desc) if _desc else Description("")
terms[_id] = Term(_id, _name, desc, dict(_relations), synonyms, dict(_term))
return terms, typedefs
|
[
"def",
"_classify",
"(",
"_rawtypedef",
",",
"_rawterms",
")",
":",
"terms",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"_cached_synonyms",
"=",
"{",
"}",
"typedefs",
"=",
"[",
"Relationship",
".",
"_from_obo_dict",
"(",
"# instantiate a new Relationship",
"{",
"k",
":",
"v",
"for",
"k",
",",
"lv",
"in",
"six",
".",
"iteritems",
"(",
"_typedef",
")",
"for",
"v",
"in",
"lv",
"}",
")",
"for",
"_typedef",
"in",
"_rawtypedef",
"]",
"for",
"_term",
"in",
"_rawterms",
":",
"synonyms",
"=",
"set",
"(",
")",
"_id",
"=",
"_term",
"[",
"'id'",
"]",
"[",
"0",
"]",
"_name",
"=",
"_term",
".",
"pop",
"(",
"'name'",
",",
"(",
"''",
",",
")",
")",
"[",
"0",
"]",
"_desc",
"=",
"_term",
".",
"pop",
"(",
"'def'",
",",
"(",
"''",
",",
")",
")",
"[",
"0",
"]",
"_relations",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"try",
":",
"for",
"other",
"in",
"_term",
".",
"get",
"(",
"'is_a'",
",",
"(",
")",
")",
":",
"_relations",
"[",
"Relationship",
"(",
"'is_a'",
")",
"]",
".",
"append",
"(",
"other",
".",
"split",
"(",
"'!'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"except",
"IndexError",
":",
"pass",
"try",
":",
"for",
"relname",
",",
"other",
"in",
"(",
"x",
".",
"split",
"(",
"' '",
",",
"1",
")",
"for",
"x",
"in",
"_term",
".",
"pop",
"(",
"'relationship'",
",",
"(",
")",
")",
")",
":",
"_relations",
"[",
"Relationship",
"(",
"relname",
")",
"]",
".",
"append",
"(",
"other",
".",
"split",
"(",
"'!'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"except",
"IndexError",
":",
"pass",
"for",
"key",
",",
"scope",
"in",
"six",
".",
"iteritems",
"(",
"_obo_synonyms_map",
")",
":",
"for",
"obo_header",
"in",
"_term",
".",
"pop",
"(",
"key",
",",
"(",
")",
")",
":",
"try",
":",
"s",
"=",
"_cached_synonyms",
"[",
"obo_header",
"]",
"except",
"KeyError",
":",
"s",
"=",
"Synonym",
".",
"from_obo",
"(",
"obo_header",
",",
"scope",
")",
"_cached_synonyms",
"[",
"obo_header",
"]",
"=",
"s",
"finally",
":",
"synonyms",
".",
"add",
"(",
"s",
")",
"desc",
"=",
"Description",
".",
"from_obo",
"(",
"_desc",
")",
"if",
"_desc",
"else",
"Description",
"(",
"\"\"",
")",
"terms",
"[",
"_id",
"]",
"=",
"Term",
"(",
"_id",
",",
"_name",
",",
"desc",
",",
"dict",
"(",
"_relations",
")",
",",
"synonyms",
",",
"dict",
"(",
"_term",
")",
")",
"return",
"terms",
",",
"typedefs"
] |
Create proper objects out of extracted dictionnaries.
New Relationship objects are instantiated with the help of
the `Relationship._from_obo_dict` alternate constructor.
New `Term` objects are instantiated by manually extracting id,
name, desc and relationships out of the ``_rawterm``
dictionnary, and then calling the default constructor.
|
[
"Create",
"proper",
"objects",
"out",
"of",
"extracted",
"dictionnaries",
"."
] |
a768adcba19fb34f26f67cde4a03d317f932c274
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/obo.py#L192-L244
|
train
|
matheuscas/pycpfcnpj
|
pycpfcnpj/calculation.py
|
calculate_first_digit
|
def calculate_first_digit(number):
""" This function calculates the first check digit of a
cpf or cnpj.
:param number: cpf (length 9) or cnpf (length 12)
string to check the first digit. Only numbers.
:type number: string
:returns: string -- the first digit
"""
sum = 0
if len(number) == 9:
weights = CPF_WEIGHTS[0]
else:
weights = CNPJ_WEIGHTS[0]
for i in range(len(number)):
sum = sum + int(number[i]) * weights[i]
rest_division = sum % DIVISOR
if rest_division < 2:
return '0'
return str(11 - rest_division)
|
python
|
def calculate_first_digit(number):
""" This function calculates the first check digit of a
cpf or cnpj.
:param number: cpf (length 9) or cnpf (length 12)
string to check the first digit. Only numbers.
:type number: string
:returns: string -- the first digit
"""
sum = 0
if len(number) == 9:
weights = CPF_WEIGHTS[0]
else:
weights = CNPJ_WEIGHTS[0]
for i in range(len(number)):
sum = sum + int(number[i]) * weights[i]
rest_division = sum % DIVISOR
if rest_division < 2:
return '0'
return str(11 - rest_division)
|
[
"def",
"calculate_first_digit",
"(",
"number",
")",
":",
"sum",
"=",
"0",
"if",
"len",
"(",
"number",
")",
"==",
"9",
":",
"weights",
"=",
"CPF_WEIGHTS",
"[",
"0",
"]",
"else",
":",
"weights",
"=",
"CNPJ_WEIGHTS",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"number",
")",
")",
":",
"sum",
"=",
"sum",
"+",
"int",
"(",
"number",
"[",
"i",
"]",
")",
"*",
"weights",
"[",
"i",
"]",
"rest_division",
"=",
"sum",
"%",
"DIVISOR",
"if",
"rest_division",
"<",
"2",
":",
"return",
"'0'",
"return",
"str",
"(",
"11",
"-",
"rest_division",
")"
] |
This function calculates the first check digit of a
cpf or cnpj.
:param number: cpf (length 9) or cnpf (length 12)
string to check the first digit. Only numbers.
:type number: string
:returns: string -- the first digit
|
[
"This",
"function",
"calculates",
"the",
"first",
"check",
"digit",
"of",
"a",
"cpf",
"or",
"cnpj",
"."
] |
42f7db466280042af10e0e555cb8d2f5bb9865ee
|
https://github.com/matheuscas/pycpfcnpj/blob/42f7db466280042af10e0e555cb8d2f5bb9865ee/pycpfcnpj/calculation.py#L10-L32
|
train
|
matheuscas/pycpfcnpj
|
pycpfcnpj/cpfcnpj.py
|
validate
|
def validate(number):
"""This functions acts like a Facade to the other modules cpf and cnpj
and validates either CPF and CNPJ numbers.
Feel free to use this or the other modules directly.
:param number: a CPF or CNPJ number. Clear number to have only numbers.
:type number: string
:return: Bool -- True if number is a valid CPF or CNPJ number.
False if it is not or do not complain
with the right size of these numbers.
"""
clean_number = clear_punctuation(number)
if len(clean_number) == 11:
return cpf.validate(clean_number)
elif len(clean_number) == 14:
return cnpj.validate(clean_number)
return False
|
python
|
def validate(number):
"""This functions acts like a Facade to the other modules cpf and cnpj
and validates either CPF and CNPJ numbers.
Feel free to use this or the other modules directly.
:param number: a CPF or CNPJ number. Clear number to have only numbers.
:type number: string
:return: Bool -- True if number is a valid CPF or CNPJ number.
False if it is not or do not complain
with the right size of these numbers.
"""
clean_number = clear_punctuation(number)
if len(clean_number) == 11:
return cpf.validate(clean_number)
elif len(clean_number) == 14:
return cnpj.validate(clean_number)
return False
|
[
"def",
"validate",
"(",
"number",
")",
":",
"clean_number",
"=",
"clear_punctuation",
"(",
"number",
")",
"if",
"len",
"(",
"clean_number",
")",
"==",
"11",
":",
"return",
"cpf",
".",
"validate",
"(",
"clean_number",
")",
"elif",
"len",
"(",
"clean_number",
")",
"==",
"14",
":",
"return",
"cnpj",
".",
"validate",
"(",
"clean_number",
")",
"return",
"False"
] |
This functions acts like a Facade to the other modules cpf and cnpj
and validates either CPF and CNPJ numbers.
Feel free to use this or the other modules directly.
:param number: a CPF or CNPJ number. Clear number to have only numbers.
:type number: string
:return: Bool -- True if number is a valid CPF or CNPJ number.
False if it is not or do not complain
with the right size of these numbers.
|
[
"This",
"functions",
"acts",
"like",
"a",
"Facade",
"to",
"the",
"other",
"modules",
"cpf",
"and",
"cnpj",
"and",
"validates",
"either",
"CPF",
"and",
"CNPJ",
"numbers",
".",
"Feel",
"free",
"to",
"use",
"this",
"or",
"the",
"other",
"modules",
"directly",
"."
] |
42f7db466280042af10e0e555cb8d2f5bb9865ee
|
https://github.com/matheuscas/pycpfcnpj/blob/42f7db466280042af10e0e555cb8d2f5bb9865ee/pycpfcnpj/cpfcnpj.py#L7-L25
|
train
|
matheuscas/pycpfcnpj
|
pycpfcnpj/cpf.py
|
validate
|
def validate(cpf_number):
"""This function validates a CPF number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cpf_number: a CPF number to be validated. Only numbers.
:type cpf_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cpf = compat.clear_punctuation(cpf_number)
if (len(_cpf) != 11 or
len(set(_cpf)) == 1):
return False
first_part = _cpf[:9]
second_part = _cpf[:10]
first_digit = _cpf[9]
second_digit = _cpf[10]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False
|
python
|
def validate(cpf_number):
"""This function validates a CPF number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cpf_number: a CPF number to be validated. Only numbers.
:type cpf_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cpf = compat.clear_punctuation(cpf_number)
if (len(_cpf) != 11 or
len(set(_cpf)) == 1):
return False
first_part = _cpf[:9]
second_part = _cpf[:10]
first_digit = _cpf[9]
second_digit = _cpf[10]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False
|
[
"def",
"validate",
"(",
"cpf_number",
")",
":",
"_cpf",
"=",
"compat",
".",
"clear_punctuation",
"(",
"cpf_number",
")",
"if",
"(",
"len",
"(",
"_cpf",
")",
"!=",
"11",
"or",
"len",
"(",
"set",
"(",
"_cpf",
")",
")",
"==",
"1",
")",
":",
"return",
"False",
"first_part",
"=",
"_cpf",
"[",
":",
"9",
"]",
"second_part",
"=",
"_cpf",
"[",
":",
"10",
"]",
"first_digit",
"=",
"_cpf",
"[",
"9",
"]",
"second_digit",
"=",
"_cpf",
"[",
"10",
"]",
"if",
"(",
"first_digit",
"==",
"calc",
".",
"calculate_first_digit",
"(",
"first_part",
")",
"and",
"second_digit",
"==",
"calc",
".",
"calculate_second_digit",
"(",
"second_part",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
This function validates a CPF number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cpf_number: a CPF number to be validated. Only numbers.
:type cpf_number: string
:return: Bool -- True for a valid number, False otherwise.
|
[
"This",
"function",
"validates",
"a",
"CPF",
"number",
"."
] |
42f7db466280042af10e0e555cb8d2f5bb9865ee
|
https://github.com/matheuscas/pycpfcnpj/blob/42f7db466280042af10e0e555cb8d2f5bb9865ee/pycpfcnpj/cpf.py#L5-L32
|
train
|
matheuscas/pycpfcnpj
|
pycpfcnpj/cnpj.py
|
validate
|
def validate(cnpj_number):
"""This function validates a CNPJ number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cnpj_number: a CNPJ number to be validated. Only numbers.
:type cnpj_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cnpj = compat.clear_punctuation(cnpj_number)
if (len(_cnpj) != 14 or
len(set(_cnpj)) == 1):
return False
first_part = _cnpj[:12]
second_part = _cnpj[:13]
first_digit = _cnpj[12]
second_digit = _cnpj[13]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False
|
python
|
def validate(cnpj_number):
"""This function validates a CNPJ number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cnpj_number: a CNPJ number to be validated. Only numbers.
:type cnpj_number: string
:return: Bool -- True for a valid number, False otherwise.
"""
_cnpj = compat.clear_punctuation(cnpj_number)
if (len(_cnpj) != 14 or
len(set(_cnpj)) == 1):
return False
first_part = _cnpj[:12]
second_part = _cnpj[:13]
first_digit = _cnpj[12]
second_digit = _cnpj[13]
if (first_digit == calc.calculate_first_digit(first_part) and
second_digit == calc.calculate_second_digit(second_part)):
return True
return False
|
[
"def",
"validate",
"(",
"cnpj_number",
")",
":",
"_cnpj",
"=",
"compat",
".",
"clear_punctuation",
"(",
"cnpj_number",
")",
"if",
"(",
"len",
"(",
"_cnpj",
")",
"!=",
"14",
"or",
"len",
"(",
"set",
"(",
"_cnpj",
")",
")",
"==",
"1",
")",
":",
"return",
"False",
"first_part",
"=",
"_cnpj",
"[",
":",
"12",
"]",
"second_part",
"=",
"_cnpj",
"[",
":",
"13",
"]",
"first_digit",
"=",
"_cnpj",
"[",
"12",
"]",
"second_digit",
"=",
"_cnpj",
"[",
"13",
"]",
"if",
"(",
"first_digit",
"==",
"calc",
".",
"calculate_first_digit",
"(",
"first_part",
")",
"and",
"second_digit",
"==",
"calc",
".",
"calculate_second_digit",
"(",
"second_part",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
This function validates a CNPJ number.
This function uses calculation package to calculate both digits
and then validates the number.
:param cnpj_number: a CNPJ number to be validated. Only numbers.
:type cnpj_number: string
:return: Bool -- True for a valid number, False otherwise.
|
[
"This",
"function",
"validates",
"a",
"CNPJ",
"number",
"."
] |
42f7db466280042af10e0e555cb8d2f5bb9865ee
|
https://github.com/matheuscas/pycpfcnpj/blob/42f7db466280042af10e0e555cb8d2f5bb9865ee/pycpfcnpj/cnpj.py#L5-L32
|
train
|
tableau/document-api-python
|
tableaudocumentapi/xfile.py
|
xml_open
|
def xml_open(filename, expected_root=None):
"""Opens the provided 'filename'. Handles detecting if the file is an archive,
detecting the document version, and validating the root tag."""
# Is the file a zip (.twbx or .tdsx)
if zipfile.is_zipfile(filename):
tree = get_xml_from_archive(filename)
else:
tree = ET.parse(filename)
# Is the file a supported version
tree_root = tree.getroot()
file_version = Version(tree_root.attrib.get('version', '0.0'))
if file_version < MIN_SUPPORTED_VERSION:
raise TableauVersionNotSupportedException(file_version)
# Does the root tag match the object type (workbook or data source)
if expected_root and (expected_root != tree_root.tag):
raise TableauInvalidFileException(
"'{}'' is not a valid '{}' file".format(filename, expected_root))
return tree
|
python
|
def xml_open(filename, expected_root=None):
"""Opens the provided 'filename'. Handles detecting if the file is an archive,
detecting the document version, and validating the root tag."""
# Is the file a zip (.twbx or .tdsx)
if zipfile.is_zipfile(filename):
tree = get_xml_from_archive(filename)
else:
tree = ET.parse(filename)
# Is the file a supported version
tree_root = tree.getroot()
file_version = Version(tree_root.attrib.get('version', '0.0'))
if file_version < MIN_SUPPORTED_VERSION:
raise TableauVersionNotSupportedException(file_version)
# Does the root tag match the object type (workbook or data source)
if expected_root and (expected_root != tree_root.tag):
raise TableauInvalidFileException(
"'{}'' is not a valid '{}' file".format(filename, expected_root))
return tree
|
[
"def",
"xml_open",
"(",
"filename",
",",
"expected_root",
"=",
"None",
")",
":",
"# Is the file a zip (.twbx or .tdsx)",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"filename",
")",
":",
"tree",
"=",
"get_xml_from_archive",
"(",
"filename",
")",
"else",
":",
"tree",
"=",
"ET",
".",
"parse",
"(",
"filename",
")",
"# Is the file a supported version",
"tree_root",
"=",
"tree",
".",
"getroot",
"(",
")",
"file_version",
"=",
"Version",
"(",
"tree_root",
".",
"attrib",
".",
"get",
"(",
"'version'",
",",
"'0.0'",
")",
")",
"if",
"file_version",
"<",
"MIN_SUPPORTED_VERSION",
":",
"raise",
"TableauVersionNotSupportedException",
"(",
"file_version",
")",
"# Does the root tag match the object type (workbook or data source)",
"if",
"expected_root",
"and",
"(",
"expected_root",
"!=",
"tree_root",
".",
"tag",
")",
":",
"raise",
"TableauInvalidFileException",
"(",
"\"'{}'' is not a valid '{}' file\"",
".",
"format",
"(",
"filename",
",",
"expected_root",
")",
")",
"return",
"tree"
] |
Opens the provided 'filename'. Handles detecting if the file is an archive,
detecting the document version, and validating the root tag.
|
[
"Opens",
"the",
"provided",
"filename",
".",
"Handles",
"detecting",
"if",
"the",
"file",
"is",
"an",
"archive",
"detecting",
"the",
"document",
"version",
"and",
"validating",
"the",
"root",
"tag",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L24-L46
|
train
|
tableau/document-api-python
|
tableaudocumentapi/xfile.py
|
build_archive_file
|
def build_archive_file(archive_contents, zip_file):
"""Build a Tableau-compatible archive file."""
# This is tested against Desktop and Server, and reverse engineered by lots
# of trial and error. Do not change this logic.
for root_dir, _, files in os.walk(archive_contents):
relative_dir = os.path.relpath(root_dir, archive_contents)
for f in files:
temp_file_full_path = os.path.join(
archive_contents, relative_dir, f)
zipname = os.path.join(relative_dir, f)
zip_file.write(temp_file_full_path, arcname=zipname)
|
python
|
def build_archive_file(archive_contents, zip_file):
"""Build a Tableau-compatible archive file."""
# This is tested against Desktop and Server, and reverse engineered by lots
# of trial and error. Do not change this logic.
for root_dir, _, files in os.walk(archive_contents):
relative_dir = os.path.relpath(root_dir, archive_contents)
for f in files:
temp_file_full_path = os.path.join(
archive_contents, relative_dir, f)
zipname = os.path.join(relative_dir, f)
zip_file.write(temp_file_full_path, arcname=zipname)
|
[
"def",
"build_archive_file",
"(",
"archive_contents",
",",
"zip_file",
")",
":",
"# This is tested against Desktop and Server, and reverse engineered by lots",
"# of trial and error. Do not change this logic.",
"for",
"root_dir",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"archive_contents",
")",
":",
"relative_dir",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"root_dir",
",",
"archive_contents",
")",
"for",
"f",
"in",
"files",
":",
"temp_file_full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"archive_contents",
",",
"relative_dir",
",",
"f",
")",
"zipname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"relative_dir",
",",
"f",
")",
"zip_file",
".",
"write",
"(",
"temp_file_full_path",
",",
"arcname",
"=",
"zipname",
")"
] |
Build a Tableau-compatible archive file.
|
[
"Build",
"a",
"Tableau",
"-",
"compatible",
"archive",
"file",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L85-L96
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.from_attributes
|
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
|
python
|
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
|
[
"def",
"from_attributes",
"(",
"cls",
",",
"server",
",",
"dbname",
",",
"username",
",",
"dbclass",
",",
"port",
"=",
"None",
",",
"query_band",
"=",
"None",
",",
"initial_sql",
"=",
"None",
",",
"authentication",
"=",
"''",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"'connection'",
",",
"authentication",
"=",
"authentication",
")",
"xml",
"=",
"cls",
"(",
"root",
")",
"xml",
".",
"server",
"=",
"server",
"xml",
".",
"dbname",
"=",
"dbname",
"xml",
".",
"username",
"=",
"username",
"xml",
".",
"dbclass",
"=",
"dbclass",
"xml",
".",
"port",
"=",
"port",
"xml",
".",
"query_band",
"=",
"query_band",
"xml",
".",
"initial_sql",
"=",
"initial_sql",
"return",
"xml"
] |
Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau.
|
[
"Creates",
"a",
"new",
"connection",
"that",
"can",
"be",
"added",
"into",
"a",
"Data",
"Source",
".",
"defaults",
"to",
"which",
"will",
"be",
"treated",
"as",
"prompt",
"by",
"Tableau",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L28-L43
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.dbname
|
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
|
python
|
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
|
[
"def",
"dbname",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_dbname",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'dbname'",
",",
"value",
")"
] |
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"database",
"name",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L51-L63
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.server
|
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
|
python
|
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
|
[
"def",
"server",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_server",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'server'",
",",
"value",
")"
] |
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"server",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L71-L83
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.username
|
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
|
python
|
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
|
[
"def",
"username",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_username",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'username'",
",",
"value",
")"
] |
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"username",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L91-L103
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.dbclass
|
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
|
python
|
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
|
[
"def",
"dbclass",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"is_valid_dbclass",
"(",
"value",
")",
":",
"raise",
"AttributeError",
"(",
"\"'{}' is not a valid database type\"",
".",
"format",
"(",
"value",
")",
")",
"self",
".",
"_class",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'class'",
",",
"value",
")"
] |
Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"dbclass",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L116-L130
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.port
|
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
|
python
|
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
|
[
"def",
"port",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_port",
"=",
"value",
"# If port is None we remove the element and don't write it to XML",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"self",
".",
"_connectionXML",
".",
"attrib",
"[",
"'port'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'port'",
",",
"value",
")"
] |
Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"port",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L138-L156
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.query_band
|
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
|
python
|
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
|
[
"def",
"query_band",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_query_band",
"=",
"value",
"# If query band is None we remove the element and don't write it to XML",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"self",
".",
"_connectionXML",
".",
"attrib",
"[",
"'query-band-spec'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'query-band-spec'",
",",
"value",
")"
] |
Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"query_band",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L164-L182
|
train
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.initial_sql
|
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
python
|
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
[
"def",
"initial_sql",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_initial_sql",
"=",
"value",
"# If initial_sql is None we remove the element and don't write it to XML",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"self",
".",
"_connectionXML",
".",
"attrib",
"[",
"'one-time-sql'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'one-time-sql'",
",",
"value",
")"
] |
Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"initial_sql",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L190-L208
|
train
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
base36encode
|
def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36
|
python
|
def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36
|
[
"def",
"base36encode",
"(",
"number",
")",
":",
"ALPHABET",
"=",
"\"0123456789abcdefghijklmnopqrstuvwxyz\"",
"base36",
"=",
"''",
"sign",
"=",
"''",
"if",
"number",
"<",
"0",
":",
"sign",
"=",
"'-'",
"number",
"=",
"-",
"number",
"if",
"0",
"<=",
"number",
"<",
"len",
"(",
"ALPHABET",
")",
":",
"return",
"sign",
"+",
"ALPHABET",
"[",
"number",
"]",
"while",
"number",
"!=",
"0",
":",
"number",
",",
"i",
"=",
"divmod",
"(",
"number",
",",
"len",
"(",
"ALPHABET",
")",
")",
"base36",
"=",
"ALPHABET",
"[",
"i",
"]",
"+",
"base36",
"return",
"sign",
"+",
"base36"
] |
Converts an integer into a base36 string.
|
[
"Converts",
"an",
"integer",
"into",
"a",
"base36",
"string",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L63-L82
|
train
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
ConnectionParser.get_connections
|
def get_connections(self):
"""Find and return all connections based on file format version."""
if float(self._dsversion) < 10:
connections = self._extract_legacy_connection()
else:
connections = self._extract_federated_connections()
return connections
|
python
|
def get_connections(self):
"""Find and return all connections based on file format version."""
if float(self._dsversion) < 10:
connections = self._extract_legacy_connection()
else:
connections = self._extract_federated_connections()
return connections
|
[
"def",
"get_connections",
"(",
"self",
")",
":",
"if",
"float",
"(",
"self",
".",
"_dsversion",
")",
"<",
"10",
":",
"connections",
"=",
"self",
".",
"_extract_legacy_connection",
"(",
")",
"else",
":",
"connections",
"=",
"self",
".",
"_extract_federated_connections",
"(",
")",
"return",
"connections"
] |
Find and return all connections based on file format version.
|
[
"Find",
"and",
"return",
"all",
"connections",
"based",
"on",
"file",
"format",
"version",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L108-L115
|
train
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
Datasource.from_connections
|
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
|
python
|
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
|
[
"def",
"from_connections",
"(",
"cls",
",",
"caption",
",",
"connections",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"'datasource'",
",",
"caption",
"=",
"caption",
",",
"version",
"=",
"'10.0'",
",",
"inline",
"=",
"'true'",
")",
"outer_connection",
"=",
"ET",
".",
"SubElement",
"(",
"root",
",",
"'connection'",
")",
"outer_connection",
".",
"set",
"(",
"'class'",
",",
"'federated'",
")",
"named_conns",
"=",
"ET",
".",
"SubElement",
"(",
"outer_connection",
",",
"'named-connections'",
")",
"for",
"conn",
"in",
"connections",
":",
"nc",
"=",
"ET",
".",
"SubElement",
"(",
"named_conns",
",",
"'named-connection'",
",",
"name",
"=",
"_make_unique_name",
"(",
"conn",
".",
"dbclass",
")",
",",
"caption",
"=",
"conn",
".",
"server",
")",
"nc",
".",
"append",
"(",
"conn",
".",
"_connectionXML",
")",
"return",
"cls",
"(",
"root",
")"
] |
Create a new Data Source give a list of Connections.
|
[
"Create",
"a",
"new",
"Data",
"Source",
"give",
"a",
"list",
"of",
"Connections",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L149-L162
|
train
|
tableau/document-api-python
|
tableaudocumentapi/field.py
|
Field.name
|
def name(self):
""" Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. """
alias = getattr(self, 'alias', None)
if alias:
return alias
caption = getattr(self, 'caption', None)
if caption:
return caption
return self.id
|
python
|
def name(self):
""" Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. """
alias = getattr(self, 'alias', None)
if alias:
return alias
caption = getattr(self, 'caption', None)
if caption:
return caption
return self.id
|
[
"def",
"name",
"(",
"self",
")",
":",
"alias",
"=",
"getattr",
"(",
"self",
",",
"'alias'",
",",
"None",
")",
"if",
"alias",
":",
"return",
"alias",
"caption",
"=",
"getattr",
"(",
"self",
",",
"'caption'",
",",
"None",
")",
"if",
"caption",
":",
"return",
"caption",
"return",
"self",
".",
"id"
] |
Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist.
|
[
"Provides",
"a",
"nice",
"name",
"for",
"the",
"field",
"which",
"is",
"derived",
"from",
"the",
"alias",
"caption",
"or",
"the",
"id",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/field.py#L99-L112
|
train
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2._check_configuration
|
def _check_configuration(self, *attrs):
"""Check that each named attr has been configured
"""
for attr in attrs:
if getattr(self, attr, None) is None:
raise ConfigurationError("{} not configured".format(attr))
|
python
|
def _check_configuration(self, *attrs):
"""Check that each named attr has been configured
"""
for attr in attrs:
if getattr(self, attr, None) is None:
raise ConfigurationError("{} not configured".format(attr))
|
[
"def",
"_check_configuration",
"(",
"self",
",",
"*",
"attrs",
")",
":",
"for",
"attr",
"in",
"attrs",
":",
"if",
"getattr",
"(",
"self",
",",
"attr",
",",
"None",
")",
"is",
"None",
":",
"raise",
"ConfigurationError",
"(",
"\"{} not configured\"",
".",
"format",
"(",
"attr",
")",
")"
] |
Check that each named attr has been configured
|
[
"Check",
"that",
"each",
"named",
"attr",
"has",
"been",
"configured"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L41-L46
|
train
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2._make_request
|
def _make_request(self, url, **kwargs):
"""
Make a request to an OAuth2 endpoint
"""
response = requests.post(url, **kwargs)
try:
return response.json()
except ValueError:
pass
return parse_qs(response.content)
|
python
|
def _make_request(self, url, **kwargs):
"""
Make a request to an OAuth2 endpoint
"""
response = requests.post(url, **kwargs)
try:
return response.json()
except ValueError:
pass
return parse_qs(response.content)
|
[
"def",
"_make_request",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"pass",
"return",
"parse_qs",
"(",
"response",
".",
"content",
")"
] |
Make a request to an OAuth2 endpoint
|
[
"Make",
"a",
"request",
"to",
"an",
"OAuth2",
"endpoint"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L48-L57
|
train
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2.get_token
|
def get_token(self, code, headers=None, **kwargs):
"""
Requests an access token
"""
self._check_configuration("site", "token_url", "redirect_uri",
"client_id", "client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'redirect_uri': self.redirect_uri,
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
python
|
def get_token(self, code, headers=None, **kwargs):
"""
Requests an access token
"""
self._check_configuration("site", "token_url", "redirect_uri",
"client_id", "client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'redirect_uri': self.redirect_uri,
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
[
"def",
"get_token",
"(",
"self",
",",
"code",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_configuration",
"(",
"\"site\"",
",",
"\"token_url\"",
",",
"\"redirect_uri\"",
",",
"\"client_id\"",
",",
"\"client_secret\"",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"self",
".",
"site",
",",
"quote",
"(",
"self",
".",
"token_url",
")",
")",
"data",
"=",
"{",
"'redirect_uri'",
":",
"self",
".",
"redirect_uri",
",",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
",",
"'code'",
":",
"code",
",",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_make_request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")"
] |
Requests an access token
|
[
"Requests",
"an",
"access",
"token"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L77-L92
|
train
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2.refresh_token
|
def refresh_token(self, headers=None, **kwargs):
"""
Request a refreshed token
"""
self._check_configuration("site", "token_url", "client_id",
"client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
python
|
def refresh_token(self, headers=None, **kwargs):
"""
Request a refreshed token
"""
self._check_configuration("site", "token_url", "client_id",
"client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
[
"def",
"refresh_token",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_configuration",
"(",
"\"site\"",
",",
"\"token_url\"",
",",
"\"client_id\"",
",",
"\"client_secret\"",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"self",
".",
"site",
",",
"quote",
"(",
"self",
".",
"token_url",
")",
")",
"data",
"=",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
",",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_make_request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")"
] |
Request a refreshed token
|
[
"Request",
"a",
"refreshed",
"token"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L94-L107
|
train
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2.revoke_token
|
def revoke_token(self, token, headers=None, **kwargs):
"""
Revoke an access token
"""
self._check_configuration("site", "revoke_uri")
url = "%s%s" % (self.site, quote(self.revoke_url))
data = {'token': token}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
python
|
def revoke_token(self, token, headers=None, **kwargs):
"""
Revoke an access token
"""
self._check_configuration("site", "revoke_uri")
url = "%s%s" % (self.site, quote(self.revoke_url))
data = {'token': token}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
[
"def",
"revoke_token",
"(",
"self",
",",
"token",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_configuration",
"(",
"\"site\"",
",",
"\"revoke_uri\"",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"self",
".",
"site",
",",
"quote",
"(",
"self",
".",
"revoke_url",
")",
")",
"data",
"=",
"{",
"'token'",
":",
"token",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_make_request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")"
] |
Revoke an access token
|
[
"Revoke",
"an",
"access",
"token"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L109-L118
|
train
|
jorgenkg/python-neural-network
|
nimblenet/neuralnet.py
|
NeuralNet.save_network_to_file
|
def save_network_to_file(self, filename = "network0.pkl" ):
import cPickle, os, re
"""
This save method pickles the parameters of the current network into a
binary file for persistant storage.
"""
if filename == "network0.pkl":
while os.path.exists( os.path.join(os.getcwd(), filename )):
filename = re.sub('\d(?!\d)', lambda x: str(int(x.group(0)) + 1), filename)
with open( filename , 'wb') as file:
store_dict = {
"n_inputs" : self.n_inputs,
"layers" : self.layers,
"n_weights" : self.n_weights,
"weights" : self.weights,
}
cPickle.dump( store_dict, file, 2 )
|
python
|
def save_network_to_file(self, filename = "network0.pkl" ):
import cPickle, os, re
"""
This save method pickles the parameters of the current network into a
binary file for persistant storage.
"""
if filename == "network0.pkl":
while os.path.exists( os.path.join(os.getcwd(), filename )):
filename = re.sub('\d(?!\d)', lambda x: str(int(x.group(0)) + 1), filename)
with open( filename , 'wb') as file:
store_dict = {
"n_inputs" : self.n_inputs,
"layers" : self.layers,
"n_weights" : self.n_weights,
"weights" : self.weights,
}
cPickle.dump( store_dict, file, 2 )
|
[
"def",
"save_network_to_file",
"(",
"self",
",",
"filename",
"=",
"\"network0.pkl\"",
")",
":",
"import",
"cPickle",
",",
"os",
",",
"re",
"if",
"filename",
"==",
"\"network0.pkl\"",
":",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"filename",
")",
")",
":",
"filename",
"=",
"re",
".",
"sub",
"(",
"'\\d(?!\\d)'",
",",
"lambda",
"x",
":",
"str",
"(",
"int",
"(",
"x",
".",
"group",
"(",
"0",
")",
")",
"+",
"1",
")",
",",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"file",
":",
"store_dict",
"=",
"{",
"\"n_inputs\"",
":",
"self",
".",
"n_inputs",
",",
"\"layers\"",
":",
"self",
".",
"layers",
",",
"\"n_weights\"",
":",
"self",
".",
"n_weights",
",",
"\"weights\"",
":",
"self",
".",
"weights",
",",
"}",
"cPickle",
".",
"dump",
"(",
"store_dict",
",",
"file",
",",
"2",
")"
] |
This save method pickles the parameters of the current network into a
binary file for persistant storage.
|
[
"This",
"save",
"method",
"pickles",
"the",
"parameters",
"of",
"the",
"current",
"network",
"into",
"a",
"binary",
"file",
"for",
"persistant",
"storage",
"."
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/neuralnet.py#L194-L212
|
train
|
jorgenkg/python-neural-network
|
nimblenet/neuralnet.py
|
NeuralNet.load_network_from_file
|
def load_network_from_file( filename ):
import cPickle
"""
Load the complete configuration of a previously stored network.
"""
network = NeuralNet( {"n_inputs":1, "layers":[[0,None]]} )
with open( filename , 'rb') as file:
store_dict = cPickle.load(file)
network.n_inputs = store_dict["n_inputs"]
network.n_weights = store_dict["n_weights"]
network.layers = store_dict["layers"]
network.weights = store_dict["weights"]
return network
|
python
|
def load_network_from_file( filename ):
import cPickle
"""
Load the complete configuration of a previously stored network.
"""
network = NeuralNet( {"n_inputs":1, "layers":[[0,None]]} )
with open( filename , 'rb') as file:
store_dict = cPickle.load(file)
network.n_inputs = store_dict["n_inputs"]
network.n_weights = store_dict["n_weights"]
network.layers = store_dict["layers"]
network.weights = store_dict["weights"]
return network
|
[
"def",
"load_network_from_file",
"(",
"filename",
")",
":",
"import",
"cPickle",
"network",
"=",
"NeuralNet",
"(",
"{",
"\"n_inputs\"",
":",
"1",
",",
"\"layers\"",
":",
"[",
"[",
"0",
",",
"None",
"]",
"]",
"}",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"file",
":",
"store_dict",
"=",
"cPickle",
".",
"load",
"(",
"file",
")",
"network",
".",
"n_inputs",
"=",
"store_dict",
"[",
"\"n_inputs\"",
"]",
"network",
".",
"n_weights",
"=",
"store_dict",
"[",
"\"n_weights\"",
"]",
"network",
".",
"layers",
"=",
"store_dict",
"[",
"\"layers\"",
"]",
"network",
".",
"weights",
"=",
"store_dict",
"[",
"\"weights\"",
"]",
"return",
"network"
] |
Load the complete configuration of a previously stored network.
|
[
"Load",
"the",
"complete",
"configuration",
"of",
"a",
"previously",
"stored",
"network",
"."
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/neuralnet.py#L216-L231
|
train
|
jorgenkg/python-neural-network
|
nimblenet/preprocessing.py
|
replace_nan
|
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value
"""
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
"""
training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 )
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features.astype( np.float64 )
if np.sum(np.isnan( instance.features )):
if replace_with == None:
instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ]
else:
instance.features[ np.isnan( instance.features ) ] = replace_with
return dataset
#end
if replace_nan_with == None:
means = np.mean( np.nan_to_num(training_data), axis=0 )
return encoder
|
python
|
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value
"""
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
"""
training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 )
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features.astype( np.float64 )
if np.sum(np.isnan( instance.features )):
if replace_with == None:
instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ]
else:
instance.features[ np.isnan( instance.features ) ] = replace_with
return dataset
#end
if replace_nan_with == None:
means = np.mean( np.nan_to_num(training_data), axis=0 )
return encoder
|
[
"def",
"replace_nan",
"(",
"trainingset",
",",
"replace_with",
"=",
"None",
")",
":",
"# if replace_with = None, replaces with mean value",
"training_data",
"=",
"np",
".",
"array",
"(",
"[",
"instance",
".",
"features",
"for",
"instance",
"in",
"trainingset",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"def",
"encoder",
"(",
"dataset",
")",
":",
"for",
"instance",
"in",
"dataset",
":",
"instance",
".",
"features",
"=",
"instance",
".",
"features",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"if",
"np",
".",
"sum",
"(",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
")",
":",
"if",
"replace_with",
"==",
"None",
":",
"instance",
".",
"features",
"[",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
"]",
"=",
"means",
"[",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
"]",
"else",
":",
"instance",
".",
"features",
"[",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
"]",
"=",
"replace_with",
"return",
"dataset",
"#end",
"if",
"replace_nan_with",
"==",
"None",
":",
"means",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"nan_to_num",
"(",
"training_data",
")",
",",
"axis",
"=",
"0",
")",
"return",
"encoder"
] |
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
|
[
"Replace",
"instanced",
"of",
"not",
"a",
"number",
"with",
"either",
"the",
"mean",
"of",
"the",
"signal",
"feature",
"or",
"a",
"specific",
"value",
"assigned",
"by",
"replace_nan_with"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/preprocessing.py#L47-L69
|
train
|
jorgenkg/python-neural-network
|
nimblenet/activation_functions.py
|
elliot_function
|
def elliot_function( signal, derivative=False ):
""" A fast approximation of sigmoid """
s = 1 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return 0.5 * s / abs_signal**2
else:
# Return the activation signal
return 0.5*(signal * s) / abs_signal + 0.5
|
python
|
def elliot_function( signal, derivative=False ):
""" A fast approximation of sigmoid """
s = 1 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return 0.5 * s / abs_signal**2
else:
# Return the activation signal
return 0.5*(signal * s) / abs_signal + 0.5
|
[
"def",
"elliot_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
")",
":",
"s",
"=",
"1",
"# steepness",
"abs_signal",
"=",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"signal",
"*",
"s",
")",
")",
"if",
"derivative",
":",
"return",
"0.5",
"*",
"s",
"/",
"abs_signal",
"**",
"2",
"else",
":",
"# Return the activation signal",
"return",
"0.5",
"*",
"(",
"signal",
"*",
"s",
")",
"/",
"abs_signal",
"+",
"0.5"
] |
A fast approximation of sigmoid
|
[
"A",
"fast",
"approximation",
"of",
"sigmoid"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L39-L48
|
train
|
jorgenkg/python-neural-network
|
nimblenet/activation_functions.py
|
symmetric_elliot_function
|
def symmetric_elliot_function( signal, derivative=False ):
""" A fast approximation of tanh """
s = 1.0 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return s / abs_signal**2
else:
# Return the activation signal
return (signal * s) / abs_signal
|
python
|
def symmetric_elliot_function( signal, derivative=False ):
""" A fast approximation of tanh """
s = 1.0 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return s / abs_signal**2
else:
# Return the activation signal
return (signal * s) / abs_signal
|
[
"def",
"symmetric_elliot_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
")",
":",
"s",
"=",
"1.0",
"# steepness",
"abs_signal",
"=",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"signal",
"*",
"s",
")",
")",
"if",
"derivative",
":",
"return",
"s",
"/",
"abs_signal",
"**",
"2",
"else",
":",
"# Return the activation signal",
"return",
"(",
"signal",
"*",
"s",
")",
"/",
"abs_signal"
] |
A fast approximation of tanh
|
[
"A",
"fast",
"approximation",
"of",
"tanh"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L52-L61
|
train
|
jorgenkg/python-neural-network
|
nimblenet/activation_functions.py
|
LReLU_function
|
def LReLU_function( signal, derivative=False, leakage = 0.01 ):
"""
Leaky Rectified Linear Unit
"""
if derivative:
# Return the partial derivation of the activation function
return np.clip(signal > 0, leakage, 1.0)
else:
# Return the activation signal
output = np.copy( signal )
output[ output < 0 ] *= leakage
return output
|
python
|
def LReLU_function( signal, derivative=False, leakage = 0.01 ):
"""
Leaky Rectified Linear Unit
"""
if derivative:
# Return the partial derivation of the activation function
return np.clip(signal > 0, leakage, 1.0)
else:
# Return the activation signal
output = np.copy( signal )
output[ output < 0 ] *= leakage
return output
|
[
"def",
"LReLU_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
",",
"leakage",
"=",
"0.01",
")",
":",
"if",
"derivative",
":",
"# Return the partial derivation of the activation function",
"return",
"np",
".",
"clip",
"(",
"signal",
">",
"0",
",",
"leakage",
",",
"1.0",
")",
"else",
":",
"# Return the activation signal",
"output",
"=",
"np",
".",
"copy",
"(",
"signal",
")",
"output",
"[",
"output",
"<",
"0",
"]",
"*=",
"leakage",
"return",
"output"
] |
Leaky Rectified Linear Unit
|
[
"Leaky",
"Rectified",
"Linear",
"Unit"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L74-L85
|
train
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
is_zipfile
|
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
|
python
|
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
|
[
"def",
"is_zipfile",
"(",
"filename",
")",
":",
"result",
"=",
"False",
"try",
":",
"if",
"hasattr",
"(",
"filename",
",",
"\"read\"",
")",
":",
"result",
"=",
"_check_zipfile",
"(",
"fp",
"=",
"filename",
")",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"fp",
":",
"result",
"=",
"_check_zipfile",
"(",
"fp",
")",
"except",
"OSError",
":",
"pass",
"return",
"result"
] |
Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
|
[
"Quickly",
"see",
"if",
"a",
"file",
"is",
"a",
"ZIP",
"file",
"by",
"checking",
"the",
"magic",
"number",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L182-L196
|
train
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipExtFile.readline
|
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
|
python
|
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
|
[
"def",
"readline",
"(",
"self",
",",
"limit",
"=",
"-",
"1",
")",
":",
"if",
"not",
"self",
".",
"_universal",
"and",
"limit",
"<",
"0",
":",
"# Shortcut common case - newline found in buffer.",
"i",
"=",
"self",
".",
"_readbuffer",
".",
"find",
"(",
"b'\\n'",
",",
"self",
".",
"_offset",
")",
"+",
"1",
"if",
"i",
">",
"0",
":",
"line",
"=",
"self",
".",
"_readbuffer",
"[",
"self",
".",
"_offset",
":",
"i",
"]",
"self",
".",
"_offset",
"=",
"i",
"return",
"line",
"if",
"not",
"self",
".",
"_universal",
":",
"return",
"io",
".",
"BufferedIOBase",
".",
"readline",
"(",
"self",
",",
"limit",
")",
"line",
"=",
"b''",
"while",
"limit",
"<",
"0",
"or",
"len",
"(",
"line",
")",
"<",
"limit",
":",
"readahead",
"=",
"self",
".",
"peek",
"(",
"2",
")",
"if",
"readahead",
"==",
"b''",
":",
"return",
"line",
"#",
"# Search for universal newlines or line chunks.",
"#",
"# The pattern returns either a line chunk or a newline, but not",
"# both. Combined with peek(2), we are assured that the sequence",
"# '\\r\\n' is always retrieved completely and never split into",
"# separate newlines - '\\r', '\\n' due to coincidental readaheads.",
"#",
"match",
"=",
"self",
".",
"PATTERN",
".",
"search",
"(",
"readahead",
")",
"newline",
"=",
"match",
".",
"group",
"(",
"'newline'",
")",
"if",
"newline",
"is",
"not",
"None",
":",
"if",
"self",
".",
"newlines",
"is",
"None",
":",
"self",
".",
"newlines",
"=",
"[",
"]",
"if",
"newline",
"not",
"in",
"self",
".",
"newlines",
":",
"self",
".",
"newlines",
".",
"append",
"(",
"newline",
")",
"self",
".",
"_offset",
"+=",
"len",
"(",
"newline",
")",
"return",
"line",
"+",
"b'\\n'",
"chunk",
"=",
"match",
".",
"group",
"(",
"'chunk'",
")",
"if",
"limit",
">=",
"0",
":",
"chunk",
"=",
"chunk",
"[",
":",
"limit",
"-",
"len",
"(",
"line",
")",
"]",
"self",
".",
"_offset",
"+=",
"len",
"(",
"chunk",
")",
"line",
"+=",
"chunk",
"return",
"line"
] |
Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
|
[
"Read",
"and",
"return",
"a",
"line",
"from",
"the",
"stream",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L758-L806
|
train
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipFile.setpassword
|
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
|
python
|
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
|
[
"def",
"setpassword",
"(",
"self",
",",
"pwd",
")",
":",
"if",
"pwd",
"and",
"not",
"isinstance",
"(",
"pwd",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"\"pwd: expected bytes, got %s\"",
"%",
"type",
"(",
"pwd",
")",
")",
"if",
"pwd",
":",
"self",
".",
"pwd",
"=",
"pwd",
"else",
":",
"self",
".",
"pwd",
"=",
"None"
] |
Set default password for encrypted files.
|
[
"Set",
"default",
"password",
"for",
"encrypted",
"files",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1204-L1211
|
train
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipFile._sanitize_windows_name
|
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
|
python
|
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
|
[
"def",
"_sanitize_windows_name",
"(",
"cls",
",",
"arcname",
",",
"pathsep",
")",
":",
"table",
"=",
"cls",
".",
"_windows_illegal_name_trans_table",
"if",
"not",
"table",
":",
"illegal",
"=",
"':<>|\"?*'",
"table",
"=",
"str",
".",
"maketrans",
"(",
"illegal",
",",
"'_'",
"*",
"len",
"(",
"illegal",
")",
")",
"cls",
".",
"_windows_illegal_name_trans_table",
"=",
"table",
"arcname",
"=",
"arcname",
".",
"translate",
"(",
"table",
")",
"# remove trailing dots",
"arcname",
"=",
"(",
"x",
".",
"rstrip",
"(",
"'.'",
")",
"for",
"x",
"in",
"arcname",
".",
"split",
"(",
"pathsep",
")",
")",
"# rejoin, removing empty parts.",
"arcname",
"=",
"pathsep",
".",
"join",
"(",
"x",
"for",
"x",
"in",
"arcname",
"if",
"x",
")",
"return",
"arcname"
] |
Replace bad characters and remove trailing dots from parts.
|
[
"Replace",
"bad",
"characters",
"and",
"remove",
"trailing",
"dots",
"from",
"parts",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1341-L1353
|
train
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipFile.close
|
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
|
python
|
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"fp",
"is",
"None",
":",
"return",
"try",
":",
"if",
"self",
".",
"mode",
"in",
"(",
"'w'",
",",
"'x'",
",",
"'a'",
")",
"and",
"self",
".",
"_didModify",
":",
"# write ending records",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"_seekable",
":",
"self",
".",
"fp",
".",
"seek",
"(",
"self",
".",
"start_dir",
")",
"self",
".",
"_write_end_record",
"(",
")",
"finally",
":",
"fp",
"=",
"self",
".",
"fp",
"self",
".",
"fp",
"=",
"None",
"self",
".",
"_fpclose",
"(",
"fp",
")"
] |
Close the file, and for mode 'w', 'x' and 'a' write the ending
records.
|
[
"Close",
"the",
"file",
"and",
"for",
"mode",
"w",
"x",
"and",
"a",
"write",
"the",
"ending",
"records",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1588-L1603
|
train
|
mikusjelly/apkutils
|
apkutils/elf/elfparser.py
|
ELF.display_string_dump
|
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
|
python
|
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
|
[
"def",
"display_string_dump",
"(",
"self",
",",
"section_spec",
")",
":",
"section",
"=",
"_section_from_spec",
"(",
"self",
".",
"elf_file",
",",
"section_spec",
")",
"if",
"section",
"is",
"None",
":",
"print",
"(",
"\"Section '%s' does not exist in the file!\"",
"%",
"section_spec",
")",
"return",
"None",
"data",
"=",
"section",
".",
"data",
"(",
")",
"dataptr",
"=",
"0",
"strs",
"=",
"[",
"]",
"while",
"dataptr",
"<",
"len",
"(",
"data",
")",
":",
"while",
"dataptr",
"<",
"len",
"(",
"data",
")",
"and",
"not",
"32",
"<=",
"byte2int",
"(",
"data",
"[",
"dataptr",
"]",
")",
"<=",
"127",
":",
"dataptr",
"+=",
"1",
"if",
"dataptr",
">=",
"len",
"(",
"data",
")",
":",
"break",
"endptr",
"=",
"dataptr",
"while",
"endptr",
"<",
"len",
"(",
"data",
")",
"and",
"byte2int",
"(",
"data",
"[",
"endptr",
"]",
")",
"!=",
"0",
":",
"endptr",
"+=",
"1",
"strs",
".",
"append",
"(",
"binascii",
".",
"b2a_hex",
"(",
"data",
"[",
"dataptr",
":",
"endptr",
"]",
")",
".",
"decode",
"(",
")",
".",
"upper",
"(",
")",
")",
"dataptr",
"=",
"endptr",
"return",
"strs"
] |
Display a strings dump of a section. section_spec is either a
section number or a name.
|
[
"Display",
"a",
"strings",
"dump",
"of",
"a",
"section",
".",
"section_spec",
"is",
"either",
"a",
"section",
"number",
"or",
"a",
"name",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L57-L85
|
train
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
_EnvOpen
|
def _EnvOpen(var, mode):
"""Open a file descriptor identified by an environment variable."""
value = os.getenv(var)
if value is None:
raise ValueError("%s is not set" % var)
fd = int(value)
# If running on Windows, convert the file handle to a C file descriptor; see:
# https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4
if _WINDOWS:
fd = msvcrt.open_osfhandle(fd, 0)
return os.fdopen(fd, mode)
|
python
|
def _EnvOpen(var, mode):
"""Open a file descriptor identified by an environment variable."""
value = os.getenv(var)
if value is None:
raise ValueError("%s is not set" % var)
fd = int(value)
# If running on Windows, convert the file handle to a C file descriptor; see:
# https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4
if _WINDOWS:
fd = msvcrt.open_osfhandle(fd, 0)
return os.fdopen(fd, mode)
|
[
"def",
"_EnvOpen",
"(",
"var",
",",
"mode",
")",
":",
"value",
"=",
"os",
".",
"getenv",
"(",
"var",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"%s is not set\"",
"%",
"var",
")",
"fd",
"=",
"int",
"(",
"value",
")",
"# If running on Windows, convert the file handle to a C file descriptor; see:",
"# https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4",
"if",
"_WINDOWS",
":",
"fd",
"=",
"msvcrt",
".",
"open_osfhandle",
"(",
"fd",
",",
"0",
")",
"return",
"os",
".",
"fdopen",
"(",
"fd",
",",
"mode",
")"
] |
Open a file descriptor identified by an environment variable.
|
[
"Open",
"a",
"file",
"descriptor",
"identified",
"by",
"an",
"environment",
"variable",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L58-L71
|
train
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection.Send
|
def Send(self, message):
"""Send a message through Fleetspeak.
Args:
message: A message protocol buffer.
Returns:
Size of the message in bytes.
Raises:
ValueError: If message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise ValueError("Send requires a fleetspeak.Message")
if message.destination.service_name == "system":
raise ValueError(
"Only predefined messages can have destination.service_name == \"system\"")
return self._SendImpl(message)
|
python
|
def Send(self, message):
"""Send a message through Fleetspeak.
Args:
message: A message protocol buffer.
Returns:
Size of the message in bytes.
Raises:
ValueError: If message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise ValueError("Send requires a fleetspeak.Message")
if message.destination.service_name == "system":
raise ValueError(
"Only predefined messages can have destination.service_name == \"system\"")
return self._SendImpl(message)
|
[
"def",
"Send",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"isinstance",
"(",
"message",
",",
"common_pb2",
".",
"Message",
")",
":",
"raise",
"ValueError",
"(",
"\"Send requires a fleetspeak.Message\"",
")",
"if",
"message",
".",
"destination",
".",
"service_name",
"==",
"\"system\"",
":",
"raise",
"ValueError",
"(",
"\"Only predefined messages can have destination.service_name == \\\"system\\\"\"",
")",
"return",
"self",
".",
"_SendImpl",
"(",
"message",
")"
] |
Send a message through Fleetspeak.
Args:
message: A message protocol buffer.
Returns:
Size of the message in bytes.
Raises:
ValueError: If message is not a common_pb2.Message.
|
[
"Send",
"a",
"message",
"through",
"Fleetspeak",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L126-L143
|
train
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection.Recv
|
def Recv(self):
"""Accept a message from Fleetspeak.
Returns:
A tuple (common_pb2.Message, size of the message in bytes).
Raises:
ProtocolError: If we receive unexpected data from Fleetspeak.
"""
size = struct.unpack(_STRUCT_FMT, self._ReadN(_STRUCT_LEN))[0]
if size > MAX_SIZE:
raise ProtocolError("Expected size to be at most %d, got %d" % (MAX_SIZE,
size))
with self._read_lock:
buf = self._ReadN(size)
self._ReadMagic()
res = common_pb2.Message()
res.ParseFromString(buf)
return res, len(buf)
|
python
|
def Recv(self):
"""Accept a message from Fleetspeak.
Returns:
A tuple (common_pb2.Message, size of the message in bytes).
Raises:
ProtocolError: If we receive unexpected data from Fleetspeak.
"""
size = struct.unpack(_STRUCT_FMT, self._ReadN(_STRUCT_LEN))[0]
if size > MAX_SIZE:
raise ProtocolError("Expected size to be at most %d, got %d" % (MAX_SIZE,
size))
with self._read_lock:
buf = self._ReadN(size)
self._ReadMagic()
res = common_pb2.Message()
res.ParseFromString(buf)
return res, len(buf)
|
[
"def",
"Recv",
"(",
"self",
")",
":",
"size",
"=",
"struct",
".",
"unpack",
"(",
"_STRUCT_FMT",
",",
"self",
".",
"_ReadN",
"(",
"_STRUCT_LEN",
")",
")",
"[",
"0",
"]",
"if",
"size",
">",
"MAX_SIZE",
":",
"raise",
"ProtocolError",
"(",
"\"Expected size to be at most %d, got %d\"",
"%",
"(",
"MAX_SIZE",
",",
"size",
")",
")",
"with",
"self",
".",
"_read_lock",
":",
"buf",
"=",
"self",
".",
"_ReadN",
"(",
"size",
")",
"self",
".",
"_ReadMagic",
"(",
")",
"res",
"=",
"common_pb2",
".",
"Message",
"(",
")",
"res",
".",
"ParseFromString",
"(",
"buf",
")",
"return",
"res",
",",
"len",
"(",
"buf",
")"
] |
Accept a message from Fleetspeak.
Returns:
A tuple (common_pb2.Message, size of the message in bytes).
Raises:
ProtocolError: If we receive unexpected data from Fleetspeak.
|
[
"Accept",
"a",
"message",
"from",
"Fleetspeak",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L162-L181
|
train
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection.Heartbeat
|
def Heartbeat(self):
"""Sends a heartbeat to the Fleetspeak client.
If this daemonservice is configured to use heartbeats, clients that don't
call this method often enough are considered faulty and are restarted by
Fleetspeak.
"""
heartbeat_msg = common_pb2.Message(
message_type="Heartbeat",
destination=common_pb2.Address(service_name="system"))
self._SendImpl(heartbeat_msg)
|
python
|
def Heartbeat(self):
"""Sends a heartbeat to the Fleetspeak client.
If this daemonservice is configured to use heartbeats, clients that don't
call this method often enough are considered faulty and are restarted by
Fleetspeak.
"""
heartbeat_msg = common_pb2.Message(
message_type="Heartbeat",
destination=common_pb2.Address(service_name="system"))
self._SendImpl(heartbeat_msg)
|
[
"def",
"Heartbeat",
"(",
"self",
")",
":",
"heartbeat_msg",
"=",
"common_pb2",
".",
"Message",
"(",
"message_type",
"=",
"\"Heartbeat\"",
",",
"destination",
"=",
"common_pb2",
".",
"Address",
"(",
"service_name",
"=",
"\"system\"",
")",
")",
"self",
".",
"_SendImpl",
"(",
"heartbeat_msg",
")"
] |
Sends a heartbeat to the Fleetspeak client.
If this daemonservice is configured to use heartbeats, clients that don't
call this method often enough are considered faulty and are restarted by
Fleetspeak.
|
[
"Sends",
"a",
"heartbeat",
"to",
"the",
"Fleetspeak",
"client",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L183-L193
|
train
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection._ReadN
|
def _ReadN(self, n):
"""Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
"""
ret = ""
while True:
chunk = self._read_file.read(n - len(ret))
ret += chunk
if len(ret) == n or not chunk:
return ret
|
python
|
def _ReadN(self, n):
"""Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
"""
ret = ""
while True:
chunk = self._read_file.read(n - len(ret))
ret += chunk
if len(ret) == n or not chunk:
return ret
|
[
"def",
"_ReadN",
"(",
"self",
",",
"n",
")",
":",
"ret",
"=",
"\"\"",
"while",
"True",
":",
"chunk",
"=",
"self",
".",
"_read_file",
".",
"read",
"(",
"n",
"-",
"len",
"(",
"ret",
")",
")",
"ret",
"+=",
"chunk",
"if",
"len",
"(",
"ret",
")",
"==",
"n",
"or",
"not",
"chunk",
":",
"return",
"ret"
] |
Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
|
[
"Reads",
"n",
"characters",
"from",
"the",
"input",
"stream",
"or",
"until",
"EOF",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L214-L232
|
train
|
google/fleetspeak
|
setup.py
|
_CompileProtos
|
def _CompileProtos():
"""Compiles all Fleetspeak protos."""
proto_files = []
for dir_path, _, filenames in os.walk(THIS_DIRECTORY):
for filename in filenames:
if filename.endswith(".proto"):
proto_files.append(os.path.join(dir_path, filename))
if not proto_files:
return
protoc_command = [
"python", "-m", "grpc_tools.protoc",
"--python_out", THIS_DIRECTORY,
"--grpc_python_out", THIS_DIRECTORY,
"--proto_path", THIS_DIRECTORY,
]
protoc_command.extend(proto_files)
subprocess.check_output(protoc_command)
|
python
|
def _CompileProtos():
"""Compiles all Fleetspeak protos."""
proto_files = []
for dir_path, _, filenames in os.walk(THIS_DIRECTORY):
for filename in filenames:
if filename.endswith(".proto"):
proto_files.append(os.path.join(dir_path, filename))
if not proto_files:
return
protoc_command = [
"python", "-m", "grpc_tools.protoc",
"--python_out", THIS_DIRECTORY,
"--grpc_python_out", THIS_DIRECTORY,
"--proto_path", THIS_DIRECTORY,
]
protoc_command.extend(proto_files)
subprocess.check_output(protoc_command)
|
[
"def",
"_CompileProtos",
"(",
")",
":",
"proto_files",
"=",
"[",
"]",
"for",
"dir_path",
",",
"_",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"THIS_DIRECTORY",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
".",
"endswith",
"(",
"\".proto\"",
")",
":",
"proto_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"filename",
")",
")",
"if",
"not",
"proto_files",
":",
"return",
"protoc_command",
"=",
"[",
"\"python\"",
",",
"\"-m\"",
",",
"\"grpc_tools.protoc\"",
",",
"\"--python_out\"",
",",
"THIS_DIRECTORY",
",",
"\"--grpc_python_out\"",
",",
"THIS_DIRECTORY",
",",
"\"--proto_path\"",
",",
"THIS_DIRECTORY",
",",
"]",
"protoc_command",
".",
"extend",
"(",
"proto_files",
")",
"subprocess",
".",
"check_output",
"(",
"protoc_command",
")"
] |
Compiles all Fleetspeak protos.
|
[
"Compiles",
"all",
"Fleetspeak",
"protos",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/setup.py#L42-L58
|
train
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
OutgoingConnection._RetryLoop
|
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time()
|
python
|
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time()
|
[
"def",
"_RetryLoop",
"(",
"self",
",",
"func",
",",
"timeout",
"=",
"None",
")",
":",
"timeout",
"=",
"timeout",
"or",
"self",
".",
"DEFAULT_TIMEOUT",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"timeout",
"sleep",
"=",
"1",
"while",
"True",
":",
"try",
":",
"return",
"func",
"(",
"timeout",
")",
"except",
"grpc",
".",
"RpcError",
":",
"if",
"time",
".",
"time",
"(",
")",
"+",
"sleep",
">",
"deadline",
":",
"raise",
"time",
".",
"sleep",
"(",
"sleep",
")",
"sleep",
"*=",
"2",
"timeout",
"=",
"deadline",
"-",
"time",
".",
"time",
"(",
")"
] |
Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
|
[
"Retries",
"an",
"operation",
"until",
"success",
"or",
"deadline",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L150-L172
|
train
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
OutgoingConnection.InsertMessage
|
def InsertMessage(self, message, timeout=None):
"""Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise InvalidArgument("Attempt to send unexpected message type: %s" %
message.__class__.__name__)
if not message.HasField("source"):
message.source.service_name = self._service_name
# Sometimes GRPC reports failure, even though the call succeeded. To prevent
# retry logic from creating duplicate messages we fix the message_id.
if not message.message_id:
message.message_id = os.urandom(32)
return self._RetryLoop(
lambda t: self._stub.InsertMessage(message, timeout=t))
|
python
|
def InsertMessage(self, message, timeout=None):
"""Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise InvalidArgument("Attempt to send unexpected message type: %s" %
message.__class__.__name__)
if not message.HasField("source"):
message.source.service_name = self._service_name
# Sometimes GRPC reports failure, even though the call succeeded. To prevent
# retry logic from creating duplicate messages we fix the message_id.
if not message.message_id:
message.message_id = os.urandom(32)
return self._RetryLoop(
lambda t: self._stub.InsertMessage(message, timeout=t))
|
[
"def",
"InsertMessage",
"(",
"self",
",",
"message",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"message",
",",
"common_pb2",
".",
"Message",
")",
":",
"raise",
"InvalidArgument",
"(",
"\"Attempt to send unexpected message type: %s\"",
"%",
"message",
".",
"__class__",
".",
"__name__",
")",
"if",
"not",
"message",
".",
"HasField",
"(",
"\"source\"",
")",
":",
"message",
".",
"source",
".",
"service_name",
"=",
"self",
".",
"_service_name",
"# Sometimes GRPC reports failure, even though the call succeeded. To prevent",
"# retry logic from creating duplicate messages we fix the message_id.",
"if",
"not",
"message",
".",
"message_id",
":",
"message",
".",
"message_id",
"=",
"os",
".",
"urandom",
"(",
"32",
")",
"return",
"self",
".",
"_RetryLoop",
"(",
"lambda",
"t",
":",
"self",
".",
"_stub",
".",
"InsertMessage",
"(",
"message",
",",
"timeout",
"=",
"t",
")",
")"
] |
Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
|
[
"Inserts",
"a",
"message",
"into",
"the",
"Fleetspeak",
"server",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L174-L202
|
train
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
OutgoingConnection.ListClients
|
def ListClients(self, request, timeout=None):
"""Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
"""
return self._RetryLoop(
lambda t: self._stub.ListClients(request, timeout=t))
|
python
|
def ListClients(self, request, timeout=None):
"""Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
"""
return self._RetryLoop(
lambda t: self._stub.ListClients(request, timeout=t))
|
[
"def",
"ListClients",
"(",
"self",
",",
"request",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"self",
".",
"_RetryLoop",
"(",
"lambda",
"t",
":",
"self",
".",
"_stub",
".",
"ListClients",
"(",
"request",
",",
"timeout",
"=",
"t",
")",
")"
] |
Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
|
[
"Provides",
"basic",
"information",
"about",
"Fleetspeak",
"clients",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L204-L215
|
train
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
InsecureGRPCServiceClient.Send
|
def Send(self, message):
"""Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
"""
if not self.outgoing:
raise NotConfigured("Send address not provided.")
self.outgoing.InsertMessage(message)
|
python
|
def Send(self, message):
"""Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
"""
if not self.outgoing:
raise NotConfigured("Send address not provided.")
self.outgoing.InsertMessage(message)
|
[
"def",
"Send",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"self",
".",
"outgoing",
":",
"raise",
"NotConfigured",
"(",
"\"Send address not provided.\"",
")",
"self",
".",
"outgoing",
".",
"InsertMessage",
"(",
"message",
")"
] |
Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
|
[
"Send",
"one",
"message",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L325-L333
|
train
|
reiinakano/xcessiv
|
xcessiv/automatedruns.py
|
start_naive_bayes
|
def start_naive_bayes(automated_run, session, path):
"""Starts naive bayes automated run
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
random_state = 8 if not hasattr(module, 'random_state') else module.random_state
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
# get non-searchable parameters
base_estimator = automated_run.base_learner_origin.return_estimator()
base_estimator.set_params(**module.default_params)
default_params = functions.make_serializable(base_estimator.get_params())
non_searchable_params = dict((key, val) for key, val in iteritems(default_params)
if key not in module.pbounds)
# get already calculated base learners in search space
existing_base_learners = []
for base_learner in automated_run.base_learner_origin.base_learners:
if not base_learner.job_status == 'finished':
continue
in_search_space = True
for key, val in iteritems(non_searchable_params):
if base_learner.hyperparameters[key] != val:
in_search_space = False
break # If no match, move on to the next base learner
if in_search_space:
existing_base_learners.append(base_learner)
# build initialize dictionary
target = []
initialization_dict = dict((key, list()) for key in module.pbounds.keys())
for base_learner in existing_base_learners:
# check if base learner's searchable hyperparameters are all numerical
all_numerical = True
for key in module.pbounds.keys():
if not isinstance(base_learner.hyperparameters[key], numbers.Number):
all_numerical = False
break
if not all_numerical:
continue # if there is a non-numerical hyperparameter, skip this.
for key in module.pbounds.keys():
initialization_dict[key].append(base_learner.hyperparameters[key])
target.append(base_learner.individual_score[module.metric_to_optimize])
initialization_dict['target'] = target if not module.invert_metric \
else list(map(lambda x: -x, target))
print('{} existing in initialization dictionary'.
format(len(initialization_dict['target'])))
# Create function to be optimized
func_to_optimize = return_func_to_optimize(
path, session, automated_run.base_learner_origin, module.default_params,
module.metric_to_optimize, module.invert_metric, set(module.integers)
)
# Create Bayes object
bo = BayesianOptimization(func_to_optimize, module.pbounds)
bo.initialize(initialization_dict)
np.random.seed(random_state)
bo.maximize(**module.maximize_config)
|
python
|
def start_naive_bayes(automated_run, session, path):
"""Starts naive bayes automated run
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
random_state = 8 if not hasattr(module, 'random_state') else module.random_state
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
# get non-searchable parameters
base_estimator = automated_run.base_learner_origin.return_estimator()
base_estimator.set_params(**module.default_params)
default_params = functions.make_serializable(base_estimator.get_params())
non_searchable_params = dict((key, val) for key, val in iteritems(default_params)
if key not in module.pbounds)
# get already calculated base learners in search space
existing_base_learners = []
for base_learner in automated_run.base_learner_origin.base_learners:
if not base_learner.job_status == 'finished':
continue
in_search_space = True
for key, val in iteritems(non_searchable_params):
if base_learner.hyperparameters[key] != val:
in_search_space = False
break # If no match, move on to the next base learner
if in_search_space:
existing_base_learners.append(base_learner)
# build initialize dictionary
target = []
initialization_dict = dict((key, list()) for key in module.pbounds.keys())
for base_learner in existing_base_learners:
# check if base learner's searchable hyperparameters are all numerical
all_numerical = True
for key in module.pbounds.keys():
if not isinstance(base_learner.hyperparameters[key], numbers.Number):
all_numerical = False
break
if not all_numerical:
continue # if there is a non-numerical hyperparameter, skip this.
for key in module.pbounds.keys():
initialization_dict[key].append(base_learner.hyperparameters[key])
target.append(base_learner.individual_score[module.metric_to_optimize])
initialization_dict['target'] = target if not module.invert_metric \
else list(map(lambda x: -x, target))
print('{} existing in initialization dictionary'.
format(len(initialization_dict['target'])))
# Create function to be optimized
func_to_optimize = return_func_to_optimize(
path, session, automated_run.base_learner_origin, module.default_params,
module.metric_to_optimize, module.invert_metric, set(module.integers)
)
# Create Bayes object
bo = BayesianOptimization(func_to_optimize, module.pbounds)
bo.initialize(initialization_dict)
np.random.seed(random_state)
bo.maximize(**module.maximize_config)
|
[
"def",
"start_naive_bayes",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
":",
"module",
"=",
"functions",
".",
"import_string_code_as_module",
"(",
"automated_run",
".",
"source",
")",
"random_state",
"=",
"8",
"if",
"not",
"hasattr",
"(",
"module",
",",
"'random_state'",
")",
"else",
"module",
".",
"random_state",
"assert",
"module",
".",
"metric_to_optimize",
"in",
"automated_run",
".",
"base_learner_origin",
".",
"metric_generators",
"# get non-searchable parameters",
"base_estimator",
"=",
"automated_run",
".",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"base_estimator",
".",
"set_params",
"(",
"*",
"*",
"module",
".",
"default_params",
")",
"default_params",
"=",
"functions",
".",
"make_serializable",
"(",
"base_estimator",
".",
"get_params",
"(",
")",
")",
"non_searchable_params",
"=",
"dict",
"(",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"default_params",
")",
"if",
"key",
"not",
"in",
"module",
".",
"pbounds",
")",
"# get already calculated base learners in search space",
"existing_base_learners",
"=",
"[",
"]",
"for",
"base_learner",
"in",
"automated_run",
".",
"base_learner_origin",
".",
"base_learners",
":",
"if",
"not",
"base_learner",
".",
"job_status",
"==",
"'finished'",
":",
"continue",
"in_search_space",
"=",
"True",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"non_searchable_params",
")",
":",
"if",
"base_learner",
".",
"hyperparameters",
"[",
"key",
"]",
"!=",
"val",
":",
"in_search_space",
"=",
"False",
"break",
"# If no match, move on to the next base learner",
"if",
"in_search_space",
":",
"existing_base_learners",
".",
"append",
"(",
"base_learner",
")",
"# build initialize dictionary",
"target",
"=",
"[",
"]",
"initialization_dict",
"=",
"dict",
"(",
"(",
"key",
",",
"list",
"(",
")",
")",
"for",
"key",
"in",
"module",
".",
"pbounds",
".",
"keys",
"(",
")",
")",
"for",
"base_learner",
"in",
"existing_base_learners",
":",
"# check if base learner's searchable hyperparameters are all numerical",
"all_numerical",
"=",
"True",
"for",
"key",
"in",
"module",
".",
"pbounds",
".",
"keys",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"base_learner",
".",
"hyperparameters",
"[",
"key",
"]",
",",
"numbers",
".",
"Number",
")",
":",
"all_numerical",
"=",
"False",
"break",
"if",
"not",
"all_numerical",
":",
"continue",
"# if there is a non-numerical hyperparameter, skip this.",
"for",
"key",
"in",
"module",
".",
"pbounds",
".",
"keys",
"(",
")",
":",
"initialization_dict",
"[",
"key",
"]",
".",
"append",
"(",
"base_learner",
".",
"hyperparameters",
"[",
"key",
"]",
")",
"target",
".",
"append",
"(",
"base_learner",
".",
"individual_score",
"[",
"module",
".",
"metric_to_optimize",
"]",
")",
"initialization_dict",
"[",
"'target'",
"]",
"=",
"target",
"if",
"not",
"module",
".",
"invert_metric",
"else",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"-",
"x",
",",
"target",
")",
")",
"print",
"(",
"'{} existing in initialization dictionary'",
".",
"format",
"(",
"len",
"(",
"initialization_dict",
"[",
"'target'",
"]",
")",
")",
")",
"# Create function to be optimized",
"func_to_optimize",
"=",
"return_func_to_optimize",
"(",
"path",
",",
"session",
",",
"automated_run",
".",
"base_learner_origin",
",",
"module",
".",
"default_params",
",",
"module",
".",
"metric_to_optimize",
",",
"module",
".",
"invert_metric",
",",
"set",
"(",
"module",
".",
"integers",
")",
")",
"# Create Bayes object",
"bo",
"=",
"BayesianOptimization",
"(",
"func_to_optimize",
",",
"module",
".",
"pbounds",
")",
"bo",
".",
"initialize",
"(",
"initialization_dict",
")",
"np",
".",
"random",
".",
"seed",
"(",
"random_state",
")",
"bo",
".",
"maximize",
"(",
"*",
"*",
"module",
".",
"maximize_config",
")"
] |
Starts naive bayes automated run
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
|
[
"Starts",
"naive",
"bayes",
"automated",
"run"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/automatedruns.py#L139-L207
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.