repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pepkit/peppy | peppy/utils.py | standard_stream_redirector | def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr | python | def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr | [
"def",
"standard_stream_redirector",
"(",
"stream",
")",
":",
"import",
"sys",
"genuine_stdout",
",",
"genuine_stderr",
"=",
"sys",
".",
"stdout",
",",
"sys",
".",
"stderr",
"sys",
".",
"stdout",
",",
"sys",
".",
"stderr",
"=",
"stream",
",",
"stream",
"tr... | Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams | [
"Temporarily",
"redirect",
"stdout",
"and",
"stderr",
"to",
"another",
"stream",
"."
] | f0f725e1557936b81c86573a77400e6f8da78f05 | https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L393-L409 | train | 33,500 |
momozor/python-pixabay | pixabay.py | Video.search | def search(
self,
q="yellow flower",
lang="en",
video_type="all",
category="",
min_width=0,
min_height=0,
editors_choice="false",
safesearch="false",
order="popular",
page=1,
per_page=20,
callback="",
pretty="false",
):
"""returns videos API data in dict
Videos search
:param q :type str :desc A URL encoded search term. If omitted,
all images are returned. This value may not exceed 100 characters.
Example: "yellow+flower"
Default: "yellow+flower"
:param lang :type str :desc Language code of the language to be searched in.
Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi,
sv, tr, vi, th, bg, ru, el, ja, ko, zh
Default: "en"
For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
:param video_type :type str :desc Filter results by video type.
Accepted values: "all", "film", "animation"
Default: "all"
:param category :type str :desc Filter results by category.
Accepted values: fashion, nature, backgrounds, science, education, people,
feelings, religion, health, places, animals, industry, food, computer, sports,
transportation, travel, buildings, business, music
:param min_width :type int :desc Minimum image width
Default: 0
:param min_height :type int :desc Minimum image height
Default: 0
:param editors_choice :type bool (python-pixabay use "true" and "false" string instead)
:desc Select images that have received
an Editor's Choice award.
Accepted values: "true", "false"
Default: "false"
:param safesearch :type bool (python-pixabay use "true" and "false" string instead)
:desc A flag indicating that only images suitable
for all ages should be returned.
Accepted values: "true", "false"
Default: "false"
:param order :type str :desc How the results should be ordered.
Accepted values: "popular", "latest"
Default: "popular"
:param page :type int :desc Returned search results are paginated.
Use this parameter to select the page number.
Default: 1
:param per_page :type int :desc Determine the number of results per page.
Accepted values: 3 - 200
Default: 20
:param callback :type str :desc JSONP callback function name
:param pretty :type bool (python-pixabay use "true" and "false" string instead)
:desc Indent JSON output. This option should not
be used in production.
Accepted values: "true", "false"
Default: "false"
Code Example
>>> from pixabay import Video
>>>
>>> video = Video("api_key")
>>> video.search(q="apple", page=1)
"""
payload = {
"key": self.api_key,
"q": q,
"lang": lang,
"video_type": video_type,
"category": category,
"min_width": min_width,
"min_height": min_height,
"editors_choice": editors_choice,
"safesearch": safesearch,
"order": order,
"page": page,
"per_page": per_page,
"callback": callback,
"pretty": pretty,
}
resp = get(self.root_url + "videos/", params=payload)
if resp.status_code == 200:
return resp.json()
else:
raise ValueError(resp.text) | python | def search(
self,
q="yellow flower",
lang="en",
video_type="all",
category="",
min_width=0,
min_height=0,
editors_choice="false",
safesearch="false",
order="popular",
page=1,
per_page=20,
callback="",
pretty="false",
):
"""returns videos API data in dict
Videos search
:param q :type str :desc A URL encoded search term. If omitted,
all images are returned. This value may not exceed 100 characters.
Example: "yellow+flower"
Default: "yellow+flower"
:param lang :type str :desc Language code of the language to be searched in.
Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi,
sv, tr, vi, th, bg, ru, el, ja, ko, zh
Default: "en"
For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
:param video_type :type str :desc Filter results by video type.
Accepted values: "all", "film", "animation"
Default: "all"
:param category :type str :desc Filter results by category.
Accepted values: fashion, nature, backgrounds, science, education, people,
feelings, religion, health, places, animals, industry, food, computer, sports,
transportation, travel, buildings, business, music
:param min_width :type int :desc Minimum image width
Default: 0
:param min_height :type int :desc Minimum image height
Default: 0
:param editors_choice :type bool (python-pixabay use "true" and "false" string instead)
:desc Select images that have received
an Editor's Choice award.
Accepted values: "true", "false"
Default: "false"
:param safesearch :type bool (python-pixabay use "true" and "false" string instead)
:desc A flag indicating that only images suitable
for all ages should be returned.
Accepted values: "true", "false"
Default: "false"
:param order :type str :desc How the results should be ordered.
Accepted values: "popular", "latest"
Default: "popular"
:param page :type int :desc Returned search results are paginated.
Use this parameter to select the page number.
Default: 1
:param per_page :type int :desc Determine the number of results per page.
Accepted values: 3 - 200
Default: 20
:param callback :type str :desc JSONP callback function name
:param pretty :type bool (python-pixabay use "true" and "false" string instead)
:desc Indent JSON output. This option should not
be used in production.
Accepted values: "true", "false"
Default: "false"
Code Example
>>> from pixabay import Video
>>>
>>> video = Video("api_key")
>>> video.search(q="apple", page=1)
"""
payload = {
"key": self.api_key,
"q": q,
"lang": lang,
"video_type": video_type,
"category": category,
"min_width": min_width,
"min_height": min_height,
"editors_choice": editors_choice,
"safesearch": safesearch,
"order": order,
"page": page,
"per_page": per_page,
"callback": callback,
"pretty": pretty,
}
resp = get(self.root_url + "videos/", params=payload)
if resp.status_code == 200:
return resp.json()
else:
raise ValueError(resp.text) | [
"def",
"search",
"(",
"self",
",",
"q",
"=",
"\"yellow flower\"",
",",
"lang",
"=",
"\"en\"",
",",
"video_type",
"=",
"\"all\"",
",",
"category",
"=",
"\"\"",
",",
"min_width",
"=",
"0",
",",
"min_height",
"=",
"0",
",",
"editors_choice",
"=",
"\"false\"... | returns videos API data in dict
Videos search
:param q :type str :desc A URL encoded search term. If omitted,
all images are returned. This value may not exceed 100 characters.
Example: "yellow+flower"
Default: "yellow+flower"
:param lang :type str :desc Language code of the language to be searched in.
Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi,
sv, tr, vi, th, bg, ru, el, ja, ko, zh
Default: "en"
For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
:param video_type :type str :desc Filter results by video type.
Accepted values: "all", "film", "animation"
Default: "all"
:param category :type str :desc Filter results by category.
Accepted values: fashion, nature, backgrounds, science, education, people,
feelings, religion, health, places, animals, industry, food, computer, sports,
transportation, travel, buildings, business, music
:param min_width :type int :desc Minimum image width
Default: 0
:param min_height :type int :desc Minimum image height
Default: 0
:param editors_choice :type bool (python-pixabay use "true" and "false" string instead)
:desc Select images that have received
an Editor's Choice award.
Accepted values: "true", "false"
Default: "false"
:param safesearch :type bool (python-pixabay use "true" and "false" string instead)
:desc A flag indicating that only images suitable
for all ages should be returned.
Accepted values: "true", "false"
Default: "false"
:param order :type str :desc How the results should be ordered.
Accepted values: "popular", "latest"
Default: "popular"
:param page :type int :desc Returned search results are paginated.
Use this parameter to select the page number.
Default: 1
:param per_page :type int :desc Determine the number of results per page.
Accepted values: 3 - 200
Default: 20
:param callback :type str :desc JSONP callback function name
:param pretty :type bool (python-pixabay use "true" and "false" string instead)
:desc Indent JSON output. This option should not
be used in production.
Accepted values: "true", "false"
Default: "false"
Code Example
>>> from pixabay import Video
>>>
>>> video = Video("api_key")
>>> video.search(q="apple", page=1) | [
"returns",
"videos",
"API",
"data",
"in",
"dict"
] | 4985ed3b816c041c0e4381e3f6bf97bfda7796e1 | https://github.com/momozor/python-pixabay/blob/4985ed3b816c041c0e4381e3f6bf97bfda7796e1/pixabay.py#L168-L273 | train | 33,501 |
bamthomas/aioimaplib | aioimaplib/aioimaplib.py | quoted | def quoted(arg):
""" Given a string, return a quoted string as per RFC 3501, section 9.
Implementation copied from https://github.com/mjs/imapclient
(imapclient/imapclient.py), 3-clause BSD license
"""
if isinstance(arg, str):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
q = '"'
else:
arg = arg.replace(b'\\', b'\\\\')
arg = arg.replace(b'"', b'\\"')
q = b'"'
return q + arg + q | python | def quoted(arg):
""" Given a string, return a quoted string as per RFC 3501, section 9.
Implementation copied from https://github.com/mjs/imapclient
(imapclient/imapclient.py), 3-clause BSD license
"""
if isinstance(arg, str):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
q = '"'
else:
arg = arg.replace(b'\\', b'\\\\')
arg = arg.replace(b'"', b'\\"')
q = b'"'
return q + arg + q | [
"def",
"quoted",
"(",
"arg",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"str",
")",
":",
"arg",
"=",
"arg",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"arg",
"=",
"arg",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"q",
"=",... | Given a string, return a quoted string as per RFC 3501, section 9.
Implementation copied from https://github.com/mjs/imapclient
(imapclient/imapclient.py), 3-clause BSD license | [
"Given",
"a",
"string",
"return",
"a",
"quoted",
"string",
"as",
"per",
"RFC",
"3501",
"section",
"9",
"."
] | 9670d43950cafc4d41aab7a36824b8051fa89899 | https://github.com/bamthomas/aioimaplib/blob/9670d43950cafc4d41aab7a36824b8051fa89899/aioimaplib/aioimaplib.py#L100-L114 | train | 33,502 |
bamthomas/aioimaplib | aioimaplib/aioimaplib.py | int2ap | def int2ap(num):
"""Convert integer to A-P string representation."""
val = ''
ap = 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val += ap[mod:mod + 1]
return val | python | def int2ap(num):
"""Convert integer to A-P string representation."""
val = ''
ap = 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val += ap[mod:mod + 1]
return val | [
"def",
"int2ap",
"(",
"num",
")",
":",
"val",
"=",
"''",
"ap",
"=",
"'ABCDEFGHIJKLMNOP'",
"num",
"=",
"int",
"(",
"abs",
"(",
"num",
")",
")",
"while",
"num",
":",
"num",
",",
"mod",
"=",
"divmod",
"(",
"num",
",",
"16",
")",
"val",
"+=",
"ap",... | Convert integer to A-P string representation. | [
"Convert",
"integer",
"to",
"A",
"-",
"P",
"string",
"representation",
"."
] | 9670d43950cafc4d41aab7a36824b8051fa89899 | https://github.com/bamthomas/aioimaplib/blob/9670d43950cafc4d41aab7a36824b8051fa89899/aioimaplib/aioimaplib.py#L864-L872 | train | 33,503 |
bamthomas/aioimaplib | aioimaplib/aioimaplib.py | time2internaldate | def time2internaldate(date_time):
"""Convert date_time to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The
date_time argument can be a number (int or float) representing
seconds since epoch (as returned by time.time()), a 9-tuple
representing local time, an instance of time.struct_time (as
returned by time.localtime()), an aware datetime instance or a
double-quoted string. In the last case, it is assumed to already
be in the correct format.
"""
if isinstance(date_time, (int, float)):
dt = datetime.fromtimestamp(date_time, timezone.utc).astimezone()
elif isinstance(date_time, tuple):
try:
gmtoff = date_time.tm_gmtoff
except AttributeError:
if time.daylight:
dst = date_time[8]
if dst == -1:
dst = time.localtime(time.mktime(date_time))[8]
gmtoff = -(time.timezone, time.altzone)[dst]
else:
gmtoff = -time.timezone
delta = timedelta(seconds=gmtoff)
dt = datetime(*date_time[:6], tzinfo=timezone(delta))
elif isinstance(date_time, datetime):
if date_time.tzinfo is None:
raise ValueError("date_time must be aware")
dt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month])
return dt.strftime(fmt) | python | def time2internaldate(date_time):
"""Convert date_time to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The
date_time argument can be a number (int or float) representing
seconds since epoch (as returned by time.time()), a 9-tuple
representing local time, an instance of time.struct_time (as
returned by time.localtime()), an aware datetime instance or a
double-quoted string. In the last case, it is assumed to already
be in the correct format.
"""
if isinstance(date_time, (int, float)):
dt = datetime.fromtimestamp(date_time, timezone.utc).astimezone()
elif isinstance(date_time, tuple):
try:
gmtoff = date_time.tm_gmtoff
except AttributeError:
if time.daylight:
dst = date_time[8]
if dst == -1:
dst = time.localtime(time.mktime(date_time))[8]
gmtoff = -(time.timezone, time.altzone)[dst]
else:
gmtoff = -time.timezone
delta = timedelta(seconds=gmtoff)
dt = datetime(*date_time[:6], tzinfo=timezone(delta))
elif isinstance(date_time, datetime):
if date_time.tzinfo is None:
raise ValueError("date_time must be aware")
dt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month])
return dt.strftime(fmt) | [
"def",
"time2internaldate",
"(",
"date_time",
")",
":",
"if",
"isinstance",
"(",
"date_time",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"dt",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"date_time",
",",
"timezone",
".",
"utc",
")",
".",
"astimezone"... | Convert date_time to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The
date_time argument can be a number (int or float) representing
seconds since epoch (as returned by time.time()), a 9-tuple
representing local time, an instance of time.struct_time (as
returned by time.localtime()), an aware datetime instance or a
double-quoted string. In the last case, it is assumed to already
be in the correct format. | [
"Convert",
"date_time",
"to",
"IMAP4",
"INTERNALDATE",
"representation",
"."
] | 9670d43950cafc4d41aab7a36824b8051fa89899 | https://github.com/bamthomas/aioimaplib/blob/9670d43950cafc4d41aab7a36824b8051fa89899/aioimaplib/aioimaplib.py#L877-L912 | train | 33,504 |
mozilla/django-post-request-task | post_request_task/task.py | _send_tasks_and_stop_queuing | def _send_tasks_and_stop_queuing(**kwargs):
"""Sends all delayed Celery tasks and stop queuing new ones for now."""
log.info('Stopping queueing tasks and sending already queued ones.')
_stop_queuing_tasks()
task_queue = _get_task_queue()
while task_queue:
task, args, kwargs, extrakw = task_queue.pop(0)
task.original_apply_async(args=args, kwargs=kwargs, **extrakw) | python | def _send_tasks_and_stop_queuing(**kwargs):
"""Sends all delayed Celery tasks and stop queuing new ones for now."""
log.info('Stopping queueing tasks and sending already queued ones.')
_stop_queuing_tasks()
task_queue = _get_task_queue()
while task_queue:
task, args, kwargs, extrakw = task_queue.pop(0)
task.original_apply_async(args=args, kwargs=kwargs, **extrakw) | [
"def",
"_send_tasks_and_stop_queuing",
"(",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"info",
"(",
"'Stopping queueing tasks and sending already queued ones.'",
")",
"_stop_queuing_tasks",
"(",
")",
"task_queue",
"=",
"_get_task_queue",
"(",
")",
"while",
"task_queue",
... | Sends all delayed Celery tasks and stop queuing new ones for now. | [
"Sends",
"all",
"delayed",
"Celery",
"tasks",
"and",
"stop",
"queuing",
"new",
"ones",
"for",
"now",
"."
] | 9ea9d7004f5f93e82311ab170afcf6e51fbf82ff | https://github.com/mozilla/django-post-request-task/blob/9ea9d7004f5f93e82311ab170afcf6e51fbf82ff/post_request_task/task.py#L47-L54 | train | 33,505 |
mozilla/django-post-request-task | post_request_task/task.py | _append_task | def _append_task(t):
"""Append a task to the queue.
Expected argument is a tuple of the following form:
(task class, args, kwargs, extra kwargs).
This doesn't append to queue if the argument is already in the queue.
"""
task_queue = _get_task_queue()
if t not in task_queue:
log.debug('Appended new task to the queue: %s.', t)
task_queue.append(t)
else:
log.debug('Did not append duplicate task to the queue: %s.', t)
return None | python | def _append_task(t):
"""Append a task to the queue.
Expected argument is a tuple of the following form:
(task class, args, kwargs, extra kwargs).
This doesn't append to queue if the argument is already in the queue.
"""
task_queue = _get_task_queue()
if t not in task_queue:
log.debug('Appended new task to the queue: %s.', t)
task_queue.append(t)
else:
log.debug('Did not append duplicate task to the queue: %s.', t)
return None | [
"def",
"_append_task",
"(",
"t",
")",
":",
"task_queue",
"=",
"_get_task_queue",
"(",
")",
"if",
"t",
"not",
"in",
"task_queue",
":",
"log",
".",
"debug",
"(",
"'Appended new task to the queue: %s.'",
",",
"t",
")",
"task_queue",
".",
"append",
"(",
"t",
"... | Append a task to the queue.
Expected argument is a tuple of the following form:
(task class, args, kwargs, extra kwargs).
This doesn't append to queue if the argument is already in the queue. | [
"Append",
"a",
"task",
"to",
"the",
"queue",
"."
] | 9ea9d7004f5f93e82311ab170afcf6e51fbf82ff | https://github.com/mozilla/django-post-request-task/blob/9ea9d7004f5f93e82311ab170afcf6e51fbf82ff/post_request_task/task.py#L64-L79 | train | 33,506 |
shveenkov/aiotarantool-queue-python | aiotarantool_queue/queue.py | Task.create_from_tuple | def create_from_tuple(cls, tube, the_tuple):
"""
Create task from tuple.
Returns `Task` instance.
"""
if the_tuple is None:
return
if not the_tuple.rowcount:
raise Queue.ZeroTupleException("Error creating task")
row = the_tuple[0]
return cls(
tube,
task_id=row[0],
state=row[1],
data=row[2]
) | python | def create_from_tuple(cls, tube, the_tuple):
"""
Create task from tuple.
Returns `Task` instance.
"""
if the_tuple is None:
return
if not the_tuple.rowcount:
raise Queue.ZeroTupleException("Error creating task")
row = the_tuple[0]
return cls(
tube,
task_id=row[0],
state=row[1],
data=row[2]
) | [
"def",
"create_from_tuple",
"(",
"cls",
",",
"tube",
",",
"the_tuple",
")",
":",
"if",
"the_tuple",
"is",
"None",
":",
"return",
"if",
"not",
"the_tuple",
".",
"rowcount",
":",
"raise",
"Queue",
".",
"ZeroTupleException",
"(",
"\"Error creating task\"",
")",
... | Create task from tuple.
Returns `Task` instance. | [
"Create",
"task",
"from",
"tuple",
"."
] | b84a1e704f63f7b8cb14cbca5ec99ab8047d1715 | https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L52-L71 | train | 33,507 |
shveenkov/aiotarantool-queue-python | aiotarantool_queue/queue.py | Task.update_from_tuple | def update_from_tuple(self, the_tuple):
"""
Update task from tuple.
"""
if not the_tuple.rowcount:
raise Queue.ZeroTupleException("Error updating task")
row = the_tuple[0]
if self.task_id != row[0]:
raise Queue.BadTupleException("Wrong task: id's are not match")
self.state = row[1]
self.data = row[2] | python | def update_from_tuple(self, the_tuple):
"""
Update task from tuple.
"""
if not the_tuple.rowcount:
raise Queue.ZeroTupleException("Error updating task")
row = the_tuple[0]
if self.task_id != row[0]:
raise Queue.BadTupleException("Wrong task: id's are not match")
self.state = row[1]
self.data = row[2] | [
"def",
"update_from_tuple",
"(",
"self",
",",
"the_tuple",
")",
":",
"if",
"not",
"the_tuple",
".",
"rowcount",
":",
"raise",
"Queue",
".",
"ZeroTupleException",
"(",
"\"Error updating task\"",
")",
"row",
"=",
"the_tuple",
"[",
"0",
"]",
"if",
"self",
".",
... | Update task from tuple. | [
"Update",
"task",
"from",
"tuple",
"."
] | b84a1e704f63f7b8cb14cbca5ec99ab8047d1715 | https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L73-L86 | train | 33,508 |
shveenkov/aiotarantool-queue-python | aiotarantool_queue/queue.py | Task.peek | async def peek(self):
"""
Look at a task without changing its state.
Always returns `True`.
"""
the_tuple = await self.queue.peek(self.tube, self.task_id)
self.update_from_tuple(the_tuple)
return True | python | async def peek(self):
"""
Look at a task without changing its state.
Always returns `True`.
"""
the_tuple = await self.queue.peek(self.tube, self.task_id)
self.update_from_tuple(the_tuple)
return True | [
"async",
"def",
"peek",
"(",
"self",
")",
":",
"the_tuple",
"=",
"await",
"self",
".",
"queue",
".",
"peek",
"(",
"self",
".",
"tube",
",",
"self",
".",
"task_id",
")",
"self",
".",
"update_from_tuple",
"(",
"the_tuple",
")",
"return",
"True"
] | Look at a task without changing its state.
Always returns `True`. | [
"Look",
"at",
"a",
"task",
"without",
"changing",
"its",
"state",
"."
] | b84a1e704f63f7b8cb14cbca5ec99ab8047d1715 | https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L118-L128 | train | 33,509 |
shveenkov/aiotarantool-queue-python | aiotarantool_queue/queue.py | Tube.cmd | def cmd(self, cmd_name):
"""
Returns tarantool queue command name for current tube.
"""
return "{0}.tube.{1}:{2}".format(self.queue.lua_queue_name, self.name, cmd_name) | python | def cmd(self, cmd_name):
"""
Returns tarantool queue command name for current tube.
"""
return "{0}.tube.{1}:{2}".format(self.queue.lua_queue_name, self.name, cmd_name) | [
"def",
"cmd",
"(",
"self",
",",
"cmd_name",
")",
":",
"return",
"\"{0}.tube.{1}:{2}\"",
".",
"format",
"(",
"self",
".",
"queue",
".",
"lua_queue_name",
",",
"self",
".",
"name",
",",
"cmd_name",
")"
] | Returns tarantool queue command name for current tube. | [
"Returns",
"tarantool",
"queue",
"command",
"name",
"for",
"current",
"tube",
"."
] | b84a1e704f63f7b8cb14cbca5ec99ab8047d1715 | https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L151-L155 | train | 33,510 |
shveenkov/aiotarantool-queue-python | aiotarantool_queue/queue.py | Queue.ack | async def ack(self, tube, task_id):
"""
Report task successful execution.
Ack is accepted only from the consumer, which took the task
for execution. If a consumer disconnects, all tasks taken
by this consumer are put back to READY state (released).
Returns tarantool tuple object.
"""
cmd = tube.cmd("ack")
args = (task_id,)
res = await self.tnt.call(cmd, args)
return res | python | async def ack(self, tube, task_id):
"""
Report task successful execution.
Ack is accepted only from the consumer, which took the task
for execution. If a consumer disconnects, all tasks taken
by this consumer are put back to READY state (released).
Returns tarantool tuple object.
"""
cmd = tube.cmd("ack")
args = (task_id,)
res = await self.tnt.call(cmd, args)
return res | [
"async",
"def",
"ack",
"(",
"self",
",",
"tube",
",",
"task_id",
")",
":",
"cmd",
"=",
"tube",
".",
"cmd",
"(",
"\"ack\"",
")",
"args",
"=",
"(",
"task_id",
",",
")",
"res",
"=",
"await",
"self",
".",
"tnt",
".",
"call",
"(",
"cmd",
",",
"args"... | Report task successful execution.
Ack is accepted only from the consumer, which took the task
for execution. If a consumer disconnects, all tasks taken
by this consumer are put back to READY state (released).
Returns tarantool tuple object. | [
"Report",
"task",
"successful",
"execution",
"."
] | b84a1e704f63f7b8cb14cbca5ec99ab8047d1715 | https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L297-L311 | train | 33,511 |
shveenkov/aiotarantool-queue-python | aiotarantool_queue/queue.py | Queue.tube | def tube(self, name):
"""
Create tube object, if not created before.
Returns `Tube` object.
"""
tube = self.tubes.get(name)
if tube is None:
tube = Tube(self, name)
self.tubes[name] = tube
return tube | python | def tube(self, name):
"""
Create tube object, if not created before.
Returns `Tube` object.
"""
tube = self.tubes.get(name)
if tube is None:
tube = Tube(self, name)
self.tubes[name] = tube
return tube | [
"def",
"tube",
"(",
"self",
",",
"name",
")",
":",
"tube",
"=",
"self",
".",
"tubes",
".",
"get",
"(",
"name",
")",
"if",
"tube",
"is",
"None",
":",
"tube",
"=",
"Tube",
"(",
"self",
",",
"name",
")",
"self",
".",
"tubes",
"[",
"name",
"]",
"... | Create tube object, if not created before.
Returns `Tube` object. | [
"Create",
"tube",
"object",
"if",
"not",
"created",
"before",
"."
] | b84a1e704f63f7b8cb14cbca5ec99ab8047d1715 | https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L370-L382 | train | 33,512 |
cronofy/pycronofy | pycronofy/request_handler.py | RequestHandler.post | def post(self, endpoint='', url='', data=None, use_api_key=False, omit_api_version=False):
"""Perform a post to an API endpoint.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response.
:rtype: ``Response``
"""
return self._request('post', endpoint, url, data=data, use_api_key=use_api_key, omit_api_version=omit_api_version) | python | def post(self, endpoint='', url='', data=None, use_api_key=False, omit_api_version=False):
"""Perform a post to an API endpoint.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response.
:rtype: ``Response``
"""
return self._request('post', endpoint, url, data=data, use_api_key=use_api_key, omit_api_version=omit_api_version) | [
"def",
"post",
"(",
"self",
",",
"endpoint",
"=",
"''",
",",
"url",
"=",
"''",
",",
"data",
"=",
"None",
",",
"use_api_key",
"=",
"False",
",",
"omit_api_version",
"=",
"False",
")",
":",
"return",
"self",
".",
"_request",
"(",
"'post'",
",",
"endpoi... | Perform a post to an API endpoint.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response.
:rtype: ``Response`` | [
"Perform",
"a",
"post",
"to",
"an",
"API",
"endpoint",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/request_handler.py#L44-L53 | train | 33,513 |
cronofy/pycronofy | pycronofy/request_handler.py | RequestHandler._request | def _request(self, request_method, endpoint='', url='', data=None, params=None, use_api_key=False, omit_api_version=False):
"""Perform a http request via the specified method to an API endpoint.
:param string request_method: Request method.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response
:rtype: ``Response``
"""
if not data:
data = {}
if not params:
params = {}
if endpoint and omit_api_version and not url:
url = '%s/%s' % (self.base_url, endpoint)
if endpoint and not url:
url = '%s/%s/%s' % (self.base_url, settings.API_VERSION, endpoint)
if use_api_key:
headers = {
'Authorization': self.auth.get_api_key(),
'User-Agent': self.user_agent,
}
else:
headers = {
'Authorization': self.auth.get_authorization(),
'User-Agent': self.user_agent,
}
response = requests.__getattribute__(request_method)(
url=url,
hooks=settings.REQUEST_HOOK,
headers=headers,
json=data,
params=params
)
if ((response.status_code != 200) and (response.status_code != 202)):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise PyCronofyRequestError(
request=e.request,
response=e.response,
)
return response | python | def _request(self, request_method, endpoint='', url='', data=None, params=None, use_api_key=False, omit_api_version=False):
"""Perform a http request via the specified method to an API endpoint.
:param string request_method: Request method.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response
:rtype: ``Response``
"""
if not data:
data = {}
if not params:
params = {}
if endpoint and omit_api_version and not url:
url = '%s/%s' % (self.base_url, endpoint)
if endpoint and not url:
url = '%s/%s/%s' % (self.base_url, settings.API_VERSION, endpoint)
if use_api_key:
headers = {
'Authorization': self.auth.get_api_key(),
'User-Agent': self.user_agent,
}
else:
headers = {
'Authorization': self.auth.get_authorization(),
'User-Agent': self.user_agent,
}
response = requests.__getattribute__(request_method)(
url=url,
hooks=settings.REQUEST_HOOK,
headers=headers,
json=data,
params=params
)
if ((response.status_code != 200) and (response.status_code != 202)):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise PyCronofyRequestError(
request=e.request,
response=e.response,
)
return response | [
"def",
"_request",
"(",
"self",
",",
"request_method",
",",
"endpoint",
"=",
"''",
",",
"url",
"=",
"''",
",",
"data",
"=",
"None",
",",
"params",
"=",
"None",
",",
"use_api_key",
"=",
"False",
",",
"omit_api_version",
"=",
"False",
")",
":",
"if",
"... | Perform a http request via the specified method to an API endpoint.
:param string request_method: Request method.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response
:rtype: ``Response`` | [
"Perform",
"a",
"http",
"request",
"via",
"the",
"specified",
"method",
"to",
"an",
"API",
"endpoint",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/request_handler.py#L55-L101 | train | 33,514 |
cronofy/pycronofy | pycronofy/client.py | Client.change_participation_status | def change_participation_status(self, calendar_id, event_uid, status):
"""Changes the participation status for a calendar event
:param string calendar_id: The String Cronofy ID for the calendar to delete the event from.
:param string event_uid: A String uniquely identifying the event for your
application (note: this is NOT an ID generated by Cronofy).
:param string status: A String to set the participation status of the event to
:return: None
"""
data = {'status': status}
self.request_handler.post('calendars/%s/events/%s/participation_status' % (calendar_id, event_uid), data=data) | python | def change_participation_status(self, calendar_id, event_uid, status):
"""Changes the participation status for a calendar event
:param string calendar_id: The String Cronofy ID for the calendar to delete the event from.
:param string event_uid: A String uniquely identifying the event for your
application (note: this is NOT an ID generated by Cronofy).
:param string status: A String to set the participation status of the event to
:return: None
"""
data = {'status': status}
self.request_handler.post('calendars/%s/events/%s/participation_status' % (calendar_id, event_uid), data=data) | [
"def",
"change_participation_status",
"(",
"self",
",",
"calendar_id",
",",
"event_uid",
",",
"status",
")",
":",
"data",
"=",
"{",
"'status'",
":",
"status",
"}",
"self",
".",
"request_handler",
".",
"post",
"(",
"'calendars/%s/events/%s/participation_status'",
"... | Changes the participation status for a calendar event
:param string calendar_id: The String Cronofy ID for the calendar to delete the event from.
:param string event_uid: A String uniquely identifying the event for your
application (note: this is NOT an ID generated by Cronofy).
:param string status: A String to set the participation status of the event to
:return: None | [
"Changes",
"the",
"participation",
"status",
"for",
"a",
"calendar",
"event"
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L73-L84 | train | 33,515 |
cronofy/pycronofy | pycronofy/client.py | Client.create_notification_channel | def create_notification_channel(self, callback_url, calendar_ids=()):
"""Create a new channel for receiving push notifications.
:param string callback_url: The url that will receive push notifications.
Must not be longer than 128 characters and should be HTTPS.
:param tuple calendar_ids: List of calendar ids to create notification channels for. (Optional. Default empty tuple)
:return: Channel id and channel callback
:rtype: ``dict``
"""
data = {'callback_url': callback_url}
if calendar_ids:
data['filters'] = {'calendar_ids': calendar_ids}
return self.request_handler.post('channels', data=data).json()['channel'] | python | def create_notification_channel(self, callback_url, calendar_ids=()):
"""Create a new channel for receiving push notifications.
:param string callback_url: The url that will receive push notifications.
Must not be longer than 128 characters and should be HTTPS.
:param tuple calendar_ids: List of calendar ids to create notification channels for. (Optional. Default empty tuple)
:return: Channel id and channel callback
:rtype: ``dict``
"""
data = {'callback_url': callback_url}
if calendar_ids:
data['filters'] = {'calendar_ids': calendar_ids}
return self.request_handler.post('channels', data=data).json()['channel'] | [
"def",
"create_notification_channel",
"(",
"self",
",",
"callback_url",
",",
"calendar_ids",
"=",
"(",
")",
")",
":",
"data",
"=",
"{",
"'callback_url'",
":",
"callback_url",
"}",
"if",
"calendar_ids",
":",
"data",
"[",
"'filters'",
"]",
"=",
"{",
"'calendar... | Create a new channel for receiving push notifications.
:param string callback_url: The url that will receive push notifications.
Must not be longer than 128 characters and should be HTTPS.
:param tuple calendar_ids: List of calendar ids to create notification channels for. (Optional. Default empty tuple)
:return: Channel id and channel callback
:rtype: ``dict`` | [
"Create",
"a",
"new",
"channel",
"for",
"receiving",
"push",
"notifications",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L86-L99 | train | 33,516 |
cronofy/pycronofy | pycronofy/client.py | Client.delete_all_events | def delete_all_events(self, calendar_ids=()):
"""Deletes all events managed through Cronofy from the all of the user's calendars.
:param tuple calendar_ids: List of calendar ids to delete events for. (Optional. Default empty tuple)
"""
params = {'delete_all': True}
if calendar_ids:
params = {'calendar_ids[]': calendar_ids}
self.request_handler.delete(endpoint='events', params=params) | python | def delete_all_events(self, calendar_ids=()):
"""Deletes all events managed through Cronofy from the all of the user's calendars.
:param tuple calendar_ids: List of calendar ids to delete events for. (Optional. Default empty tuple)
"""
params = {'delete_all': True}
if calendar_ids:
params = {'calendar_ids[]': calendar_ids}
self.request_handler.delete(endpoint='events', params=params) | [
"def",
"delete_all_events",
"(",
"self",
",",
"calendar_ids",
"=",
"(",
")",
")",
":",
"params",
"=",
"{",
"'delete_all'",
":",
"True",
"}",
"if",
"calendar_ids",
":",
"params",
"=",
"{",
"'calendar_ids[]'",
":",
"calendar_ids",
"}",
"self",
".",
"request_... | Deletes all events managed through Cronofy from the all of the user's calendars.
:param tuple calendar_ids: List of calendar ids to delete events for. (Optional. Default empty tuple) | [
"Deletes",
"all",
"events",
"managed",
"through",
"Cronofy",
"from",
"the",
"all",
"of",
"the",
"user",
"s",
"calendars",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L101-L110 | train | 33,517 |
cronofy/pycronofy | pycronofy/client.py | Client.delete_event | def delete_event(self, calendar_id, event_id):
"""Delete an event from the specified calendar.
:param string calendar_id: ID of calendar to delete from.
:param string event_id: ID of event to delete.
"""
self.request_handler.delete(endpoint='calendars/%s/events' % calendar_id, data={'event_id': event_id}) | python | def delete_event(self, calendar_id, event_id):
"""Delete an event from the specified calendar.
:param string calendar_id: ID of calendar to delete from.
:param string event_id: ID of event to delete.
"""
self.request_handler.delete(endpoint='calendars/%s/events' % calendar_id, data={'event_id': event_id}) | [
"def",
"delete_event",
"(",
"self",
",",
"calendar_id",
",",
"event_id",
")",
":",
"self",
".",
"request_handler",
".",
"delete",
"(",
"endpoint",
"=",
"'calendars/%s/events'",
"%",
"calendar_id",
",",
"data",
"=",
"{",
"'event_id'",
":",
"event_id",
"}",
")... | Delete an event from the specified calendar.
:param string calendar_id: ID of calendar to delete from.
:param string event_id: ID of event to delete. | [
"Delete",
"an",
"event",
"from",
"the",
"specified",
"calendar",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L112-L118 | train | 33,518 |
cronofy/pycronofy | pycronofy/client.py | Client.delete_external_event | def delete_external_event(self, calendar_id, event_uid):
"""Delete an external event from the specified calendar.
:param string calendar_id: ID of calendar to delete from.
:param string event_uid: ID of event to delete.
"""
self.request_handler.delete(endpoint='calendars/%s/events' % calendar_id, data={'event_uid': event_uid}) | python | def delete_external_event(self, calendar_id, event_uid):
"""Delete an external event from the specified calendar.
:param string calendar_id: ID of calendar to delete from.
:param string event_uid: ID of event to delete.
"""
self.request_handler.delete(endpoint='calendars/%s/events' % calendar_id, data={'event_uid': event_uid}) | [
"def",
"delete_external_event",
"(",
"self",
",",
"calendar_id",
",",
"event_uid",
")",
":",
"self",
".",
"request_handler",
".",
"delete",
"(",
"endpoint",
"=",
"'calendars/%s/events'",
"%",
"calendar_id",
",",
"data",
"=",
"{",
"'event_uid'",
":",
"event_uid",... | Delete an external event from the specified calendar.
:param string calendar_id: ID of calendar to delete from.
:param string event_uid: ID of event to delete. | [
"Delete",
"an",
"external",
"event",
"from",
"the",
"specified",
"calendar",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L120-L126 | train | 33,519 |
cronofy/pycronofy | pycronofy/client.py | Client.elevated_permissions | def elevated_permissions(self, permissions, redirect_uri=None):
"""Requests elevated permissions for a set of calendars.
:param tuple permissions - calendar permission dicts set each dict
must contain values for both `calendar_id` and `permission_level`
:param string redirect_uri - A uri to redirect the end user back to after they
have either granted or rejected the request for elevated permission.
In the case of normal accounts:
After making this call the end user will have to grant the extended
permissions to their calendar via rhe url returned from the response.
In the case of service accounts:
After making this call the exteneded permissions will be granted provided
the relevant scope has been granted to the account
:return: a extended permissions response.
:rtype: ``dict``
"""
body = {'permissions': permissions}
if redirect_uri:
body['redirect_uri'] = redirect_uri
return self.request_handler.post('permissions', data=body).json()['permissions_request'] | python | def elevated_permissions(self, permissions, redirect_uri=None):
"""Requests elevated permissions for a set of calendars.
:param tuple permissions - calendar permission dicts set each dict
must contain values for both `calendar_id` and `permission_level`
:param string redirect_uri - A uri to redirect the end user back to after they
have either granted or rejected the request for elevated permission.
In the case of normal accounts:
After making this call the end user will have to grant the extended
permissions to their calendar via rhe url returned from the response.
In the case of service accounts:
After making this call the exteneded permissions will be granted provided
the relevant scope has been granted to the account
:return: a extended permissions response.
:rtype: ``dict``
"""
body = {'permissions': permissions}
if redirect_uri:
body['redirect_uri'] = redirect_uri
return self.request_handler.post('permissions', data=body).json()['permissions_request'] | [
"def",
"elevated_permissions",
"(",
"self",
",",
"permissions",
",",
"redirect_uri",
"=",
"None",
")",
":",
"body",
"=",
"{",
"'permissions'",
":",
"permissions",
"}",
"if",
"redirect_uri",
":",
"body",
"[",
"'redirect_uri'",
"]",
"=",
"redirect_uri",
"return"... | Requests elevated permissions for a set of calendars.
:param tuple permissions - calendar permission dicts set each dict
must contain values for both `calendar_id` and `permission_level`
:param string redirect_uri - A uri to redirect the end user back to after they
have either granted or rejected the request for elevated permission.
In the case of normal accounts:
After making this call the end user will have to grant the extended
permissions to their calendar via rhe url returned from the response.
In the case of service accounts:
After making this call the exteneded permissions will be granted provided
the relevant scope has been granted to the account
:return: a extended permissions response.
:rtype: ``dict`` | [
"Requests",
"elevated",
"permissions",
"for",
"a",
"set",
"of",
"calendars",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L128-L153 | train | 33,520 |
cronofy/pycronofy | pycronofy/client.py | Client.get_smart_invite | def get_smart_invite(self, smart_invite_id, recipient_email):
"""Gets the details for a smart invite.
:param string smart_invite_id: - A String uniquely identifying the event for your
application (note: this is NOT an ID generated by Cronofy).
:param string recipient_email: - The email address for the recipient to get details for.
"""
params = {
'smart_invite_id': smart_invite_id,
'recipient_email': recipient_email
}
return self.request_handler.get('smart_invites', params=params, use_api_key=True).json() | python | def get_smart_invite(self, smart_invite_id, recipient_email):
"""Gets the details for a smart invite.
:param string smart_invite_id: - A String uniquely identifying the event for your
application (note: this is NOT an ID generated by Cronofy).
:param string recipient_email: - The email address for the recipient to get details for.
"""
params = {
'smart_invite_id': smart_invite_id,
'recipient_email': recipient_email
}
return self.request_handler.get('smart_invites', params=params, use_api_key=True).json() | [
"def",
"get_smart_invite",
"(",
"self",
",",
"smart_invite_id",
",",
"recipient_email",
")",
":",
"params",
"=",
"{",
"'smart_invite_id'",
":",
"smart_invite_id",
",",
"'recipient_email'",
":",
"recipient_email",
"}",
"return",
"self",
".",
"request_handler",
".",
... | Gets the details for a smart invite.
:param string smart_invite_id: - A String uniquely identifying the event for your
application (note: this is NOT an ID generated by Cronofy).
:param string recipient_email: - The email address for the recipient to get details for. | [
"Gets",
"the",
"details",
"for",
"a",
"smart",
"invite",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L218-L230 | train | 33,521 |
cronofy/pycronofy | pycronofy/client.py | Client.application_calendar | def application_calendar(self, application_calendar_id):
"""Creates and Retrieves authorization for an application calendar
:param string application_calendar_id: The Id for this application calendar
:return: Dictionary containing auth tokens, expiration info, and response status.
:rtype: ``dict``
"""
response = self.request_handler.post(
endpoint='application_calendar',
data={
'client_id': self.auth.client_id,
'client_secret': self.auth.client_secret,
'application_calendar_id': application_calendar_id,
})
data = response.json()
token_expiration = (datetime.datetime.utcnow() + datetime.timedelta(seconds=data['expires_in']))
self.auth.update(
token_expiration=token_expiration,
access_token=data['access_token'],
refresh_token=data['refresh_token'],
)
return {
'access_token': self.auth.access_token,
'refresh_token': self.auth.refresh_token,
'token_expiration': format_event_time(self.auth.token_expiration),
'sub': data.get('sub'),
'application_calendar_id': data.get('application_calendar_id')
} | python | def application_calendar(self, application_calendar_id):
"""Creates and Retrieves authorization for an application calendar
:param string application_calendar_id: The Id for this application calendar
:return: Dictionary containing auth tokens, expiration info, and response status.
:rtype: ``dict``
"""
response = self.request_handler.post(
endpoint='application_calendar',
data={
'client_id': self.auth.client_id,
'client_secret': self.auth.client_secret,
'application_calendar_id': application_calendar_id,
})
data = response.json()
token_expiration = (datetime.datetime.utcnow() + datetime.timedelta(seconds=data['expires_in']))
self.auth.update(
token_expiration=token_expiration,
access_token=data['access_token'],
refresh_token=data['refresh_token'],
)
return {
'access_token': self.auth.access_token,
'refresh_token': self.auth.refresh_token,
'token_expiration': format_event_time(self.auth.token_expiration),
'sub': data.get('sub'),
'application_calendar_id': data.get('application_calendar_id')
} | [
"def",
"application_calendar",
"(",
"self",
",",
"application_calendar_id",
")",
":",
"response",
"=",
"self",
".",
"request_handler",
".",
"post",
"(",
"endpoint",
"=",
"'application_calendar'",
",",
"data",
"=",
"{",
"'client_id'",
":",
"self",
".",
"auth",
... | Creates and Retrieves authorization for an application calendar
:param string application_calendar_id: The Id for this application calendar
:return: Dictionary containing auth tokens, expiration info, and response status.
:rtype: ``dict`` | [
"Creates",
"and",
"Retrieves",
"authorization",
"for",
"an",
"application",
"calendar"
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L263-L290 | train | 33,522 |
cronofy/pycronofy | pycronofy/client.py | Client.refresh_authorization | def refresh_authorization(self):
"""Refreshes the authorization tokens.
:return: Dictionary containing auth tokens, expiration info, and response status.
:rtype: ``dict``
"""
response = self.request_handler.post(
endpoint='oauth/token',
omit_api_version=True,
data={
'grant_type': 'refresh_token',
'client_id': self.auth.client_id,
'client_secret': self.auth.client_secret,
'refresh_token': self.auth.refresh_token,
}
)
data = response.json()
token_expiration = (datetime.datetime.utcnow() + datetime.timedelta(seconds=data['expires_in']))
self.auth.update(
token_expiration=token_expiration,
access_token=data['access_token'],
refresh_token=data['refresh_token'],
)
return {
'access_token': self.auth.access_token,
'refresh_token': self.auth.refresh_token,
'token_expiration': format_event_time(self.auth.token_expiration),
} | python | def refresh_authorization(self):
"""Refreshes the authorization tokens.
:return: Dictionary containing auth tokens, expiration info, and response status.
:rtype: ``dict``
"""
response = self.request_handler.post(
endpoint='oauth/token',
omit_api_version=True,
data={
'grant_type': 'refresh_token',
'client_id': self.auth.client_id,
'client_secret': self.auth.client_secret,
'refresh_token': self.auth.refresh_token,
}
)
data = response.json()
token_expiration = (datetime.datetime.utcnow() + datetime.timedelta(seconds=data['expires_in']))
self.auth.update(
token_expiration=token_expiration,
access_token=data['access_token'],
refresh_token=data['refresh_token'],
)
return {
'access_token': self.auth.access_token,
'refresh_token': self.auth.refresh_token,
'token_expiration': format_event_time(self.auth.token_expiration),
} | [
"def",
"refresh_authorization",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"request_handler",
".",
"post",
"(",
"endpoint",
"=",
"'oauth/token'",
",",
"omit_api_version",
"=",
"True",
",",
"data",
"=",
"{",
"'grant_type'",
":",
"'refresh_token'",
","... | Refreshes the authorization tokens.
:return: Dictionary containing auth tokens, expiration info, and response status.
:rtype: ``dict`` | [
"Refreshes",
"the",
"authorization",
"tokens",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L463-L490 | train | 33,523 |
cronofy/pycronofy | pycronofy/client.py | Client.revoke_authorization | def revoke_authorization(self):
"""Revokes Oauth authorization."""
self.request_handler.post(
endpoint='oauth/token/revoke',
omit_api_version=True,
data={
'client_id': self.auth.client_id,
'client_secret': self.auth.client_secret,
'token': self.auth.access_token,
}
)
self.auth.update(
token_expiration=None,
access_token=None,
refresh_token=None,
) | python | def revoke_authorization(self):
"""Revokes Oauth authorization."""
self.request_handler.post(
endpoint='oauth/token/revoke',
omit_api_version=True,
data={
'client_id': self.auth.client_id,
'client_secret': self.auth.client_secret,
'token': self.auth.access_token,
}
)
self.auth.update(
token_expiration=None,
access_token=None,
refresh_token=None,
) | [
"def",
"revoke_authorization",
"(",
"self",
")",
":",
"self",
".",
"request_handler",
".",
"post",
"(",
"endpoint",
"=",
"'oauth/token/revoke'",
",",
"omit_api_version",
"=",
"True",
",",
"data",
"=",
"{",
"'client_id'",
":",
"self",
".",
"auth",
".",
"clien... | Revokes Oauth authorization. | [
"Revokes",
"Oauth",
"authorization",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L492-L507 | train | 33,524 |
cronofy/pycronofy | pycronofy/client.py | Client.upsert_event | def upsert_event(self, calendar_id, event):
"""Inserts or updates an event for the specified calendar.
:param string calendar_id: ID of calendar to insert/update event into.
:param dict event: Dictionary of event data to send to cronofy.
"""
event['start'] = format_event_time(event['start'])
event['end'] = format_event_time(event['end'])
self.request_handler.post(
endpoint='calendars/%s/events' % calendar_id, data=event) | python | def upsert_event(self, calendar_id, event):
"""Inserts or updates an event for the specified calendar.
:param string calendar_id: ID of calendar to insert/update event into.
:param dict event: Dictionary of event data to send to cronofy.
"""
event['start'] = format_event_time(event['start'])
event['end'] = format_event_time(event['end'])
self.request_handler.post(
endpoint='calendars/%s/events' % calendar_id, data=event) | [
"def",
"upsert_event",
"(",
"self",
",",
"calendar_id",
",",
"event",
")",
":",
"event",
"[",
"'start'",
"]",
"=",
"format_event_time",
"(",
"event",
"[",
"'start'",
"]",
")",
"event",
"[",
"'end'",
"]",
"=",
"format_event_time",
"(",
"event",
"[",
"'end... | Inserts or updates an event for the specified calendar.
:param string calendar_id: ID of calendar to insert/update event into.
:param dict event: Dictionary of event data to send to cronofy. | [
"Inserts",
"or",
"updates",
"an",
"event",
"for",
"the",
"specified",
"calendar",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L509-L518 | train | 33,525 |
cronofy/pycronofy | pycronofy/client.py | Client.authorize_with_service_account | def authorize_with_service_account(self, email, scope, callback_url, state=None):
""" Attempts to authorize the email with impersonation from a service account
:param string email: the email address to impersonate
:param string callback_url: URL to callback with the OAuth code.
:param string scope: The scope of the privileges you want the eventual access_token to grant.
:return: nothing
"""
params = {
'email': email,
'scope': scope,
'callback_url': callback_url
}
if state is not None:
params['state'] = state
self.request_handler.post(
endpoint="service_account_authorizations", data=params)
None | python | def authorize_with_service_account(self, email, scope, callback_url, state=None):
""" Attempts to authorize the email with impersonation from a service account
:param string email: the email address to impersonate
:param string callback_url: URL to callback with the OAuth code.
:param string scope: The scope of the privileges you want the eventual access_token to grant.
:return: nothing
"""
params = {
'email': email,
'scope': scope,
'callback_url': callback_url
}
if state is not None:
params['state'] = state
self.request_handler.post(
endpoint="service_account_authorizations", data=params)
None | [
"def",
"authorize_with_service_account",
"(",
"self",
",",
"email",
",",
"scope",
",",
"callback_url",
",",
"state",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'email'",
":",
"email",
",",
"'scope'",
":",
"scope",
",",
"'callback_url'",
":",
"callback_url",... | Attempts to authorize the email with impersonation from a service account
:param string email: the email address to impersonate
:param string callback_url: URL to callback with the OAuth code.
:param string scope: The scope of the privileges you want the eventual access_token to grant.
:return: nothing | [
"Attempts",
"to",
"authorize",
"the",
"email",
"with",
"impersonation",
"from",
"a",
"service",
"account"
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L520-L539 | train | 33,526 |
cronofy/pycronofy | pycronofy/client.py | Client.real_time_scheduling | def real_time_scheduling(self, availability, oauth, event, target_calendars=()):
"""Generates an real time scheduling link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:available_periods - A dict stating the available periods for the event
:start_interval - A Integer representing the start_interval of the event
:buffer - A dict representing the buffer for the event
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference.
"""
args = {
'oauth': oauth,
'event': event,
'target_calendars': target_calendars
}
if availability:
options = {}
options['participants'] = self.map_availability_participants(availability.get('participants', None))
options['required_duration'] = self.map_availability_required_duration(availability.get('required_duration', None))
options['start_interval'] = self.map_availability_required_duration(availability.get('start_interval', None))
options['buffer'] = self.map_availability_buffer(availability.get('buffer', None))
self.translate_available_periods(availability['available_periods'])
options['available_periods'] = availability['available_periods']
args['availability'] = options
return self.request_handler.post(endpoint='real_time_scheduling', data=args, use_api_key=True).json() | python | def real_time_scheduling(self, availability, oauth, event, target_calendars=()):
"""Generates an real time scheduling link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:available_periods - A dict stating the available periods for the event
:start_interval - A Integer representing the start_interval of the event
:buffer - A dict representing the buffer for the event
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference.
"""
args = {
'oauth': oauth,
'event': event,
'target_calendars': target_calendars
}
if availability:
options = {}
options['participants'] = self.map_availability_participants(availability.get('participants', None))
options['required_duration'] = self.map_availability_required_duration(availability.get('required_duration', None))
options['start_interval'] = self.map_availability_required_duration(availability.get('start_interval', None))
options['buffer'] = self.map_availability_buffer(availability.get('buffer', None))
self.translate_available_periods(availability['available_periods'])
options['available_periods'] = availability['available_periods']
args['availability'] = options
return self.request_handler.post(endpoint='real_time_scheduling', data=args, use_api_key=True).json() | [
"def",
"real_time_scheduling",
"(",
"self",
",",
"availability",
",",
"oauth",
",",
"event",
",",
"target_calendars",
"=",
"(",
")",
")",
":",
"args",
"=",
"{",
"'oauth'",
":",
"oauth",
",",
"'event'",
":",
"event",
",",
"'target_calendars'",
":",
"target_... | Generates an real time scheduling link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:available_periods - A dict stating the available periods for the event
:start_interval - A Integer representing the start_interval of the event
:buffer - A dict representing the buffer for the event
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference. | [
"Generates",
"an",
"real",
"time",
"scheduling",
"link",
"to",
"start",
"the",
"OAuth",
"process",
"with",
"an",
"event",
"to",
"be",
"automatically",
"upserted"
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L541-L582 | train | 33,527 |
cronofy/pycronofy | pycronofy/client.py | Client.real_time_sequencing | def real_time_sequencing(self, availability, oauth, event, target_calendars=()):
"""Generates an real time sequencing link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:sequence: An Array of dics representing sequences to find availability for
each sequence can contain.
:sequence_id - A string identifying this step in the sequence.
:ordinal - An Integer defining the order of this step in the sequence.
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:event - A dict describing the event
:available_periods - A dict stating the available periods for the step
:available_periods - A dict stating the available periods for the sequence
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference.
"""
args = {
'oauth': oauth,
'event': event,
'target_calendars': target_calendars
}
if availability:
options = {}
options['sequence'] = self.map_availability_sequence(availability.get('sequence', None))
if availability.get('available_periods', None):
self.translate_available_periods(availability['available_periods'])
options['available_periods'] = availability['available_periods']
args['availability'] = options
return self.request_handler.post(endpoint='real_time_sequencing', data=args, use_api_key=True).json() | python | def real_time_sequencing(self, availability, oauth, event, target_calendars=()):
"""Generates an real time sequencing link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:sequence: An Array of dics representing sequences to find availability for
each sequence can contain.
:sequence_id - A string identifying this step in the sequence.
:ordinal - An Integer defining the order of this step in the sequence.
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:event - A dict describing the event
:available_periods - A dict stating the available periods for the step
:available_periods - A dict stating the available periods for the sequence
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference.
"""
args = {
'oauth': oauth,
'event': event,
'target_calendars': target_calendars
}
if availability:
options = {}
options['sequence'] = self.map_availability_sequence(availability.get('sequence', None))
if availability.get('available_periods', None):
self.translate_available_periods(availability['available_periods'])
options['available_periods'] = availability['available_periods']
args['availability'] = options
return self.request_handler.post(endpoint='real_time_sequencing', data=args, use_api_key=True).json() | [
"def",
"real_time_sequencing",
"(",
"self",
",",
"availability",
",",
"oauth",
",",
"event",
",",
"target_calendars",
"=",
"(",
")",
")",
":",
"args",
"=",
"{",
"'oauth'",
":",
"oauth",
",",
"'event'",
":",
"event",
",",
"'target_calendars'",
":",
"target_... | Generates an real time sequencing link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:sequence: An Array of dics representing sequences to find availability for
each sequence can contain.
:sequence_id - A string identifying this step in the sequence.
:ordinal - An Integer defining the order of this step in the sequence.
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:event - A dict describing the event
:available_periods - A dict stating the available periods for the step
:available_periods - A dict stating the available periods for the sequence
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference. | [
"Generates",
"an",
"real",
"time",
"sequencing",
"link",
"to",
"start",
"the",
"OAuth",
"process",
"with",
"an",
"event",
"to",
"be",
"automatically",
"upserted"
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L584-L629 | train | 33,528 |
cronofy/pycronofy | pycronofy/client.py | Client.user_auth_link | def user_auth_link(self, redirect_uri, scope='', state='', avoid_linking=False):
"""Generates a URL to send the user for OAuth 2.0
:param string redirect_uri: URL to redirect the user to after auth.
:param string scope: The scope of the privileges you want the eventual access_token to grant.
:param string state: A value that will be returned to you unaltered along with the user's authorization request decision.
(The OAuth 2.0 RFC recommends using this to prevent cross-site request forgery.)
:param bool avoid_linking: Avoid linking calendar accounts together under one set of credentials. (Optional, default: false).
:return: authorization link
:rtype: ``string``
"""
if not scope:
scope = ' '.join(settings.DEFAULT_OAUTH_SCOPE)
self.auth.update(redirect_uri=redirect_uri)
url = '%s/oauth/authorize' % self.app_base_url
params = {
'response_type': 'code',
'client_id': self.auth.client_id,
'redirect_uri': redirect_uri,
'scope': scope,
'state': state,
'avoid_linking': avoid_linking,
}
urlencoded_params = urlencode(params)
return "{url}?{params}".format(url=url, params=urlencoded_params) | python | def user_auth_link(self, redirect_uri, scope='', state='', avoid_linking=False):
"""Generates a URL to send the user for OAuth 2.0
:param string redirect_uri: URL to redirect the user to after auth.
:param string scope: The scope of the privileges you want the eventual access_token to grant.
:param string state: A value that will be returned to you unaltered along with the user's authorization request decision.
(The OAuth 2.0 RFC recommends using this to prevent cross-site request forgery.)
:param bool avoid_linking: Avoid linking calendar accounts together under one set of credentials. (Optional, default: false).
:return: authorization link
:rtype: ``string``
"""
if not scope:
scope = ' '.join(settings.DEFAULT_OAUTH_SCOPE)
self.auth.update(redirect_uri=redirect_uri)
url = '%s/oauth/authorize' % self.app_base_url
params = {
'response_type': 'code',
'client_id': self.auth.client_id,
'redirect_uri': redirect_uri,
'scope': scope,
'state': state,
'avoid_linking': avoid_linking,
}
urlencoded_params = urlencode(params)
return "{url}?{params}".format(url=url, params=urlencoded_params) | [
"def",
"user_auth_link",
"(",
"self",
",",
"redirect_uri",
",",
"scope",
"=",
"''",
",",
"state",
"=",
"''",
",",
"avoid_linking",
"=",
"False",
")",
":",
"if",
"not",
"scope",
":",
"scope",
"=",
"' '",
".",
"join",
"(",
"settings",
".",
"DEFAULT_OAUTH... | Generates a URL to send the user for OAuth 2.0
:param string redirect_uri: URL to redirect the user to after auth.
:param string scope: The scope of the privileges you want the eventual access_token to grant.
:param string state: A value that will be returned to you unaltered along with the user's authorization request decision.
(The OAuth 2.0 RFC recommends using this to prevent cross-site request forgery.)
:param bool avoid_linking: Avoid linking calendar accounts together under one set of credentials. (Optional, default: false).
:return: authorization link
:rtype: ``string`` | [
"Generates",
"a",
"URL",
"to",
"send",
"the",
"user",
"for",
"OAuth",
"2",
".",
"0"
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L631-L657 | train | 33,529 |
cronofy/pycronofy | pycronofy/client.py | Client.validate | def validate(self, method, *args, **kwargs):
"""Validate authentication and values passed to the specified method.
Raises a PyCronofyValidationError on error.
:param string method: Method name to check.
:param *args: Arguments for "Method".
:param **kwargs: Keyword arguments for "Method".
"""
validate(method, self.auth, *args, **kwargs) | python | def validate(self, method, *args, **kwargs):
"""Validate authentication and values passed to the specified method.
Raises a PyCronofyValidationError on error.
:param string method: Method name to check.
:param *args: Arguments for "Method".
:param **kwargs: Keyword arguments for "Method".
"""
validate(method, self.auth, *args, **kwargs) | [
"def",
"validate",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"validate",
"(",
"method",
",",
"self",
".",
"auth",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Validate authentication and values passed to the specified method.
Raises a PyCronofyValidationError on error.
:param string method: Method name to check.
:param *args: Arguments for "Method".
:param **kwargs: Keyword arguments for "Method". | [
"Validate",
"authentication",
"and",
"values",
"passed",
"to",
"the",
"specified",
"method",
".",
"Raises",
"a",
"PyCronofyValidationError",
"on",
"error",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/client.py#L659-L667 | train | 33,530 |
cronofy/pycronofy | pycronofy/pagination.py | Pages.all | def all(self):
"""Return all results as a list by automatically fetching all pages.
:return: All results.
:rtype: ``list``
"""
results = self.data[self.data_type]
while self.current < self.total:
self.fetch_next_page()
results.extend(self.data[self.data_type])
return results | python | def all(self):
"""Return all results as a list by automatically fetching all pages.
:return: All results.
:rtype: ``list``
"""
results = self.data[self.data_type]
while self.current < self.total:
self.fetch_next_page()
results.extend(self.data[self.data_type])
return results | [
"def",
"all",
"(",
"self",
")",
":",
"results",
"=",
"self",
".",
"data",
"[",
"self",
".",
"data_type",
"]",
"while",
"self",
".",
"current",
"<",
"self",
".",
"total",
":",
"self",
".",
"fetch_next_page",
"(",
")",
"results",
".",
"extend",
"(",
... | Return all results as a list by automatically fetching all pages.
:return: All results.
:rtype: ``list`` | [
"Return",
"all",
"results",
"as",
"a",
"list",
"by",
"automatically",
"fetching",
"all",
"pages",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/pagination.py#L27-L37 | train | 33,531 |
cronofy/pycronofy | pycronofy/pagination.py | Pages.fetch_next_page | def fetch_next_page(self):
"""Retrieves the next page of data and refreshes Pages instance."""
result = self.request_handler.get(url=self.next_page_url).json()
self.__init__(self.request_handler, result,
self.data_type, self.automatic_pagination) | python | def fetch_next_page(self):
"""Retrieves the next page of data and refreshes Pages instance."""
result = self.request_handler.get(url=self.next_page_url).json()
self.__init__(self.request_handler, result,
self.data_type, self.automatic_pagination) | [
"def",
"fetch_next_page",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"request_handler",
".",
"get",
"(",
"url",
"=",
"self",
".",
"next_page_url",
")",
".",
"json",
"(",
")",
"self",
".",
"__init__",
"(",
"self",
".",
"request_handler",
",",
"r... | Retrieves the next page of data and refreshes Pages instance. | [
"Retrieves",
"the",
"next",
"page",
"of",
"data",
"and",
"refreshes",
"Pages",
"instance",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/pagination.py#L47-L51 | train | 33,532 |
cronofy/pycronofy | pycronofy/validation.py | check_datetime | def check_datetime(method, dictionary, fields, label=None):
"""Checks if the specified fields are formatted correctly if they have a value.
Throws an exception on incorrectly formatted fields.
:param dict dictionary: Dictionary to check.
:param typle fields: Fields to check.
:param string label: Dictionary name.
"""
improperly_formatted = []
values = []
for field in fields:
if field in dictionary and dictionary[field] is not None:
if type(dictionary[field]) not in (datetime.datetime, datetime.date) and not ISO_8601_REGEX.match(dictionary[field]):
improperly_formatted.append(field)
values.append(dictionary[field])
if improperly_formatted:
error_label = ' for "%s"' % label if label else ''
raise PyCronofyValidationError(
'Method: %s. Improperly formatted datetime/date field(s)%s: %s\n%s' % (
method, error_label, improperly_formatted, values),
method,
improperly_formatted,
values
) | python | def check_datetime(method, dictionary, fields, label=None):
"""Checks if the specified fields are formatted correctly if they have a value.
Throws an exception on incorrectly formatted fields.
:param dict dictionary: Dictionary to check.
:param typle fields: Fields to check.
:param string label: Dictionary name.
"""
improperly_formatted = []
values = []
for field in fields:
if field in dictionary and dictionary[field] is not None:
if type(dictionary[field]) not in (datetime.datetime, datetime.date) and not ISO_8601_REGEX.match(dictionary[field]):
improperly_formatted.append(field)
values.append(dictionary[field])
if improperly_formatted:
error_label = ' for "%s"' % label if label else ''
raise PyCronofyValidationError(
'Method: %s. Improperly formatted datetime/date field(s)%s: %s\n%s' % (
method, error_label, improperly_formatted, values),
method,
improperly_formatted,
values
) | [
"def",
"check_datetime",
"(",
"method",
",",
"dictionary",
",",
"fields",
",",
"label",
"=",
"None",
")",
":",
"improperly_formatted",
"=",
"[",
"]",
"values",
"=",
"[",
"]",
"for",
"field",
"in",
"fields",
":",
"if",
"field",
"in",
"dictionary",
"and",
... | Checks if the specified fields are formatted correctly if they have a value.
Throws an exception on incorrectly formatted fields.
:param dict dictionary: Dictionary to check.
:param typle fields: Fields to check.
:param string label: Dictionary name. | [
"Checks",
"if",
"the",
"specified",
"fields",
"are",
"formatted",
"correctly",
"if",
"they",
"have",
"a",
"value",
".",
"Throws",
"an",
"exception",
"on",
"incorrectly",
"formatted",
"fields",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/validation.py#L116-L139 | train | 33,533 |
cronofy/pycronofy | pycronofy/validation.py | validate | def validate(method, auth, *args, **kwargs):
"""Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method.
"""
if method not in METHOD_RULES:
raise PyCronofyValidationError('Method "%s" not found.' % method, method)
m = METHOD_RULES[method]
arguments = {}
number_of_args = len(args)
for i, key in enumerate(m['args']):
if i < number_of_args:
arguments[key] = args[i]
elif key in kwargs:
arguments[key] = kwargs[key]
else:
arguments[key] = None
check_exists_in_object(method, auth, m['auth'])
if 'required' in m:
check_exists_in_dictionary(method, arguments, m['required'])
if 'datetime' in m:
check_datetime(method, arguments, m['datetime'])
if 'dicts' in m:
for d in m['dicts']:
check_exists_in_dictionary(method, arguments[d], m['dicts'][d], d)
if 'dicts_datetime' in m:
for d in m['dicts_datetime']:
check_datetime(method, arguments[d], m['dicts_datetime'][d], d) | python | def validate(method, auth, *args, **kwargs):
"""Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method.
"""
if method not in METHOD_RULES:
raise PyCronofyValidationError('Method "%s" not found.' % method, method)
m = METHOD_RULES[method]
arguments = {}
number_of_args = len(args)
for i, key in enumerate(m['args']):
if i < number_of_args:
arguments[key] = args[i]
elif key in kwargs:
arguments[key] = kwargs[key]
else:
arguments[key] = None
check_exists_in_object(method, auth, m['auth'])
if 'required' in m:
check_exists_in_dictionary(method, arguments, m['required'])
if 'datetime' in m:
check_datetime(method, arguments, m['datetime'])
if 'dicts' in m:
for d in m['dicts']:
check_exists_in_dictionary(method, arguments[d], m['dicts'][d], d)
if 'dicts_datetime' in m:
for d in m['dicts_datetime']:
check_datetime(method, arguments[d], m['dicts_datetime'][d], d) | [
"def",
"validate",
"(",
"method",
",",
"auth",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"not",
"in",
"METHOD_RULES",
":",
"raise",
"PyCronofyValidationError",
"(",
"'Method \"%s\" not found.'",
"%",
"method",
",",
"method",
")",
... | Validate a method based on the METHOD_RULES above.
Raises a PyCronofyValidationError on error.
:param string method: Method being validated.
:param Auth auth: Auth instance.
:param *args: Positional arguments for method.
:param **kwargs: Keyword arguments for method. | [
"Validate",
"a",
"method",
"based",
"on",
"the",
"METHOD_RULES",
"above",
"."
] | 3d807603029478fa9387a9dfb6c3acd9faa4f08e | https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/validation.py#L162-L195 | train | 33,534 |
AWegnerGitHub/stackapi | fabfile.py | release | def release(part='patch'):
""" Automated software release workflow
* (Configurably) bumps the version number
* Tags the release
You can run it like::
$ fab release
which, by default, will create a 'patch' release (0.0.1 => 0.0.2).
You can also specify a patch level (patch, minor, major) to change to::
$ fab release:part=major
which will create a 'major' release (0.0.2 => 1.0.0).
"""
# Dry run 'bumpversion' to find out what the new version number
# would be. Useful side effect: exits if the working directory is not
# clean.
bumpver = subprocess.check_output(
['bumpversion', part, '--dry-run', '--verbose'],
stderr=subprocess.STDOUT)
m = re.search(r'New version will be \'(\d+\.\d+\.\d+)\'', bumpver.decode('utf-8'))
version = m.groups(0)[0]
# Really run bumpver to set the new release and tag
bv_args = ['bumpversion', part]
bv_args += ['--new-version', version]
subprocess.check_output(bv_args) | python | def release(part='patch'):
""" Automated software release workflow
* (Configurably) bumps the version number
* Tags the release
You can run it like::
$ fab release
which, by default, will create a 'patch' release (0.0.1 => 0.0.2).
You can also specify a patch level (patch, minor, major) to change to::
$ fab release:part=major
which will create a 'major' release (0.0.2 => 1.0.0).
"""
# Dry run 'bumpversion' to find out what the new version number
# would be. Useful side effect: exits if the working directory is not
# clean.
bumpver = subprocess.check_output(
['bumpversion', part, '--dry-run', '--verbose'],
stderr=subprocess.STDOUT)
m = re.search(r'New version will be \'(\d+\.\d+\.\d+)\'', bumpver.decode('utf-8'))
version = m.groups(0)[0]
# Really run bumpver to set the new release and tag
bv_args = ['bumpversion', part]
bv_args += ['--new-version', version]
subprocess.check_output(bv_args) | [
"def",
"release",
"(",
"part",
"=",
"'patch'",
")",
":",
"# Dry run 'bumpversion' to find out what the new version number",
"# would be. Useful side effect: exits if the working directory is not",
"# clean.",
"bumpver",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'bumpvers... | Automated software release workflow
* (Configurably) bumps the version number
* Tags the release
You can run it like::
$ fab release
which, by default, will create a 'patch' release (0.0.1 => 0.0.2).
You can also specify a patch level (patch, minor, major) to change to::
$ fab release:part=major
which will create a 'major' release (0.0.2 => 1.0.0). | [
"Automated",
"software",
"release",
"workflow"
] | 146c2c5a201aa51dc8218a6e03d3e903b1d2c36d | https://github.com/AWegnerGitHub/stackapi/blob/146c2c5a201aa51dc8218a6e03d3e903b1d2c36d/fabfile.py#L12-L47 | train | 33,535 |
Commonists/CommonsDownloader | commonsdownloader/thumbnaildownload.py | make_thumbnail_name | def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + clean_extension(extension) | python | def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + clean_extension(extension) | [
"def",
"make_thumbnail_name",
"(",
"image_name",
",",
"extension",
")",
":",
"file_name",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"image_name",
")",
"return",
"file_name",
"+",
"'.'",
"+",
"clean_extension",
"(",
"extension",
")"
] | Return name of the downloaded thumbnail, based on the extension. | [
"Return",
"name",
"of",
"the",
"downloaded",
"thumbnail",
"based",
"on",
"the",
"extension",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/thumbnaildownload.py#L65-L68 | train | 33,536 |
Commonists/CommonsDownloader | commonsdownloader/thumbnaildownload.py | get_thumbnail_of_file | def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
logging.debug("Retrieving %s", url)
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
message = e.fp.read()
raise get_exception_based_on_api_message(message, image_name) | python | def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
logging.debug("Retrieving %s", url)
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
message = e.fp.read()
raise get_exception_based_on_api_message(message, image_name) | [
"def",
"get_thumbnail_of_file",
"(",
"image_name",
",",
"width",
")",
":",
"hdr",
"=",
"{",
"'User-Agent'",
":",
"'Python urllib2'",
"}",
"url",
"=",
"make_thumb_url",
"(",
"image_name",
",",
"width",
")",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"url",
... | Return the file contents of the thumbnail of the given file. | [
"Return",
"the",
"file",
"contents",
"of",
"the",
"thumbnail",
"of",
"the",
"given",
"file",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/thumbnaildownload.py#L71-L83 | train | 33,537 |
Commonists/CommonsDownloader | commonsdownloader/thumbnaildownload.py | get_exception_based_on_api_message | def get_exception_based_on_api_message(message, image_name=""):
"""Return the exception matching the given API error message."""
msg_bigger_than_source = re.compile('Image was not scaled, is the requested width bigger than the source?')
msg_does_not_exist = re.compile('The source file .* does not exist')
msg_does_not_exist_bis = re.compile('<div class="error"><p>Value not found')
if re.search(msg_bigger_than_source, message):
msg = "File %s requested at a width bigger than source" % image_name
return RequestedWidthBiggerThanSourceException(msg)
elif re.search(msg_does_not_exist, message):
msg = "File %s does not exist" % image_name
return FileDoesNotExistException(msg)
elif re.search(msg_does_not_exist_bis, message):
msg = "File %s does not exist" % image_name
return FileDoesNotExistException(msg)
else:
return DownloadException(message) | python | def get_exception_based_on_api_message(message, image_name=""):
"""Return the exception matching the given API error message."""
msg_bigger_than_source = re.compile('Image was not scaled, is the requested width bigger than the source?')
msg_does_not_exist = re.compile('The source file .* does not exist')
msg_does_not_exist_bis = re.compile('<div class="error"><p>Value not found')
if re.search(msg_bigger_than_source, message):
msg = "File %s requested at a width bigger than source" % image_name
return RequestedWidthBiggerThanSourceException(msg)
elif re.search(msg_does_not_exist, message):
msg = "File %s does not exist" % image_name
return FileDoesNotExistException(msg)
elif re.search(msg_does_not_exist_bis, message):
msg = "File %s does not exist" % image_name
return FileDoesNotExistException(msg)
else:
return DownloadException(message) | [
"def",
"get_exception_based_on_api_message",
"(",
"message",
",",
"image_name",
"=",
"\"\"",
")",
":",
"msg_bigger_than_source",
"=",
"re",
".",
"compile",
"(",
"'Image was not scaled, is the requested width bigger than the source?'",
")",
"msg_does_not_exist",
"=",
"re",
"... | Return the exception matching the given API error message. | [
"Return",
"the",
"exception",
"matching",
"the",
"given",
"API",
"error",
"message",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/thumbnaildownload.py#L101-L116 | train | 33,538 |
Commonists/CommonsDownloader | commonsdownloader/thumbnaildownload.py | download_file | def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
try:
contents, output_file_name = get_thumbnail_of_file(image_name, width)
except RequestedWidthBiggerThanSourceException:
logging.warning("Requested width is bigger than source - downloading full size")
contents, output_file_name = get_full_size_file(image_name)
output_file_path = os.path.join(output_path, output_file_name)
try:
with open(output_file_path, 'wb') as f:
logging.debug("Writing as %s", output_file_path)
f.write(contents)
return output_file_path
except IOError, e:
msg = 'Could not write file %s on disk to %s: %s' % \
(image_name, output_path, e.message)
logging.error(msg)
raise CouldNotWriteFileOnDiskException(msg)
except Exception, e:
logging.critical(e.message)
msg = 'An unexpected error occured when downloading %s to %s: %s' % \
(image_name, output_path, e.message)
raise DownloadException(msg) | python | def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
try:
contents, output_file_name = get_thumbnail_of_file(image_name, width)
except RequestedWidthBiggerThanSourceException:
logging.warning("Requested width is bigger than source - downloading full size")
contents, output_file_name = get_full_size_file(image_name)
output_file_path = os.path.join(output_path, output_file_name)
try:
with open(output_file_path, 'wb') as f:
logging.debug("Writing as %s", output_file_path)
f.write(contents)
return output_file_path
except IOError, e:
msg = 'Could not write file %s on disk to %s: %s' % \
(image_name, output_path, e.message)
logging.error(msg)
raise CouldNotWriteFileOnDiskException(msg)
except Exception, e:
logging.critical(e.message)
msg = 'An unexpected error occured when downloading %s to %s: %s' % \
(image_name, output_path, e.message)
raise DownloadException(msg) | [
"def",
"download_file",
"(",
"image_name",
",",
"output_path",
",",
"width",
"=",
"DEFAULT_WIDTH",
")",
":",
"image_name",
"=",
"clean_up_filename",
"(",
"image_name",
")",
"logging",
".",
"info",
"(",
"\"Downloading %s with width %s\"",
",",
"image_name",
",",
"w... | Download a given Wikimedia Commons file. | [
"Download",
"a",
"given",
"Wikimedia",
"Commons",
"file",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/thumbnaildownload.py#L119-L143 | train | 33,539 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | get_category_files_from_api | def get_category_files_from_api(category_name):
"""Yield the file names of a category by querying the MediaWiki API."""
import mwclient
site = mwclient.Site('commons.wikimedia.org')
category = site.Categories[category_name]
return (x.page_title.encode('utf-8')
for x in category.members(namespace=6)) | python | def get_category_files_from_api(category_name):
"""Yield the file names of a category by querying the MediaWiki API."""
import mwclient
site = mwclient.Site('commons.wikimedia.org')
category = site.Categories[category_name]
return (x.page_title.encode('utf-8')
for x in category.members(namespace=6)) | [
"def",
"get_category_files_from_api",
"(",
"category_name",
")",
":",
"import",
"mwclient",
"site",
"=",
"mwclient",
".",
"Site",
"(",
"'commons.wikimedia.org'",
")",
"category",
"=",
"site",
".",
"Categories",
"[",
"category_name",
"]",
"return",
"(",
"x",
".",... | Yield the file names of a category by querying the MediaWiki API. | [
"Yield",
"the",
"file",
"names",
"of",
"a",
"category",
"by",
"querying",
"the",
"MediaWiki",
"API",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L18-L24 | train | 33,540 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | download_from_category | def download_from_category(category_name, output_path, width):
"""Download files of a given category."""
file_names = get_category_files_from_api(category_name)
files_to_download = izip_longest(file_names, [], fillvalue=width)
download_files_if_not_in_manifest(files_to_download, output_path) | python | def download_from_category(category_name, output_path, width):
"""Download files of a given category."""
file_names = get_category_files_from_api(category_name)
files_to_download = izip_longest(file_names, [], fillvalue=width)
download_files_if_not_in_manifest(files_to_download, output_path) | [
"def",
"download_from_category",
"(",
"category_name",
",",
"output_path",
",",
"width",
")",
":",
"file_names",
"=",
"get_category_files_from_api",
"(",
"category_name",
")",
"files_to_download",
"=",
"izip_longest",
"(",
"file_names",
",",
"[",
"]",
",",
"fillvalu... | Download files of a given category. | [
"Download",
"files",
"of",
"a",
"given",
"category",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L27-L31 | train | 33,541 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | get_files_from_textfile | def get_files_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a text file handler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.rsplit(',', 1)
width = int(width)
except ValueError:
image_name = line
width = None
yield (image_name, width) | python | def get_files_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a text file handler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.rsplit(',', 1)
width = int(width)
except ValueError:
image_name = line
width = None
yield (image_name, width) | [
"def",
"get_files_from_textfile",
"(",
"textfile_handler",
")",
":",
"for",
"line",
"in",
"textfile_handler",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"try",
":",
"(",
"image_name",
",",
"width",
")",
"=",
"line",
".",
"rsplit",
"(",
"','",
","... | Yield the file names and widths by parsing a text file handler. | [
"Yield",
"the",
"file",
"names",
"and",
"widths",
"by",
"parsing",
"a",
"text",
"file",
"handler",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L34-L44 | train | 33,542 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | download_from_files | def download_from_files(files, output_path, width):
"""Download files from a given file list."""
files_to_download = get_files_from_arguments(files, width)
download_files_if_not_in_manifest(files_to_download, output_path) | python | def download_from_files(files, output_path, width):
"""Download files from a given file list."""
files_to_download = get_files_from_arguments(files, width)
download_files_if_not_in_manifest(files_to_download, output_path) | [
"def",
"download_from_files",
"(",
"files",
",",
"output_path",
",",
"width",
")",
":",
"files_to_download",
"=",
"get_files_from_arguments",
"(",
"files",
",",
"width",
")",
"download_files_if_not_in_manifest",
"(",
"files_to_download",
",",
"output_path",
")"
] | Download files from a given file list. | [
"Download",
"files",
"from",
"a",
"given",
"file",
"list",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L58-L61 | train | 33,543 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | read_local_manifest | def read_local_manifest(output_path):
"""Return the contents of the local manifest, as a dictionary."""
local_manifest_path = get_local_manifest_path(output_path)
try:
with open(local_manifest_path, 'r') as f:
manifest = dict(get_files_from_textfile(f))
logging.debug('Retrieving %s elements from manifest', len(manifest))
return manifest
except IOError:
logging.debug('No local manifest at %s', local_manifest_path)
return {} | python | def read_local_manifest(output_path):
"""Return the contents of the local manifest, as a dictionary."""
local_manifest_path = get_local_manifest_path(output_path)
try:
with open(local_manifest_path, 'r') as f:
manifest = dict(get_files_from_textfile(f))
logging.debug('Retrieving %s elements from manifest', len(manifest))
return manifest
except IOError:
logging.debug('No local manifest at %s', local_manifest_path)
return {} | [
"def",
"read_local_manifest",
"(",
"output_path",
")",
":",
"local_manifest_path",
"=",
"get_local_manifest_path",
"(",
"output_path",
")",
"try",
":",
"with",
"open",
"(",
"local_manifest_path",
",",
"'r'",
")",
"as",
"f",
":",
"manifest",
"=",
"dict",
"(",
"... | Return the contents of the local manifest, as a dictionary. | [
"Return",
"the",
"contents",
"of",
"the",
"local",
"manifest",
"as",
"a",
"dictionary",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L69-L79 | train | 33,544 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | write_file_to_manifest | def write_file_to_manifest(file_name, width, manifest_fh):
"""Write the given file in manifest."""
manifest_fh.write("%s,%s\n" % (file_name, str(width)))
logging.debug("Wrote file %s to manifest", file_name) | python | def write_file_to_manifest(file_name, width, manifest_fh):
"""Write the given file in manifest."""
manifest_fh.write("%s,%s\n" % (file_name, str(width)))
logging.debug("Wrote file %s to manifest", file_name) | [
"def",
"write_file_to_manifest",
"(",
"file_name",
",",
"width",
",",
"manifest_fh",
")",
":",
"manifest_fh",
".",
"write",
"(",
"\"%s,%s\\n\"",
"%",
"(",
"file_name",
",",
"str",
"(",
"width",
")",
")",
")",
"logging",
".",
"debug",
"(",
"\"Wrote file %s to... | Write the given file in manifest. | [
"Write",
"the",
"given",
"file",
"in",
"manifest",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L87-L90 | train | 33,545 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | download_files_if_not_in_manifest | def download_files_if_not_in_manifest(files_iterator, output_path):
"""Download the given files to the given path, unless in manifest."""
local_manifest = read_local_manifest(output_path)
with open(get_local_manifest_path(output_path), 'a') as manifest_fh:
for (file_name, width) in files_iterator:
if is_file_in_manifest(file_name, width, local_manifest):
logging.info('Skipping file %s', file_name)
continue
try:
download_file(file_name, output_path, width=width)
write_file_to_manifest(file_name, width, manifest_fh)
except DownloadException, e:
logging.error("Could not download %s: %s", file_name, e.message) | python | def download_files_if_not_in_manifest(files_iterator, output_path):
"""Download the given files to the given path, unless in manifest."""
local_manifest = read_local_manifest(output_path)
with open(get_local_manifest_path(output_path), 'a') as manifest_fh:
for (file_name, width) in files_iterator:
if is_file_in_manifest(file_name, width, local_manifest):
logging.info('Skipping file %s', file_name)
continue
try:
download_file(file_name, output_path, width=width)
write_file_to_manifest(file_name, width, manifest_fh)
except DownloadException, e:
logging.error("Could not download %s: %s", file_name, e.message) | [
"def",
"download_files_if_not_in_manifest",
"(",
"files_iterator",
",",
"output_path",
")",
":",
"local_manifest",
"=",
"read_local_manifest",
"(",
"output_path",
")",
"with",
"open",
"(",
"get_local_manifest_path",
"(",
"output_path",
")",
",",
"'a'",
")",
"as",
"m... | Download the given files to the given path, unless in manifest. | [
"Download",
"the",
"given",
"files",
"to",
"the",
"given",
"path",
"unless",
"in",
"manifest",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L93-L105 | train | 33,546 |
Commonists/CommonsDownloader | commonsdownloader/commonsdownloader.py | main | def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
source_group.add_argument("-c", "--category", metavar="CATEGORY",
dest="category_name",
type=str,
help='A category name (without prefix)')
parser.add_argument("files", nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-w", "--width",
dest="width",
type=int,
default=100,
help='The width of the thumbnail (default: 100)')
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument("-v",
action="count",
dest="verbose",
default=1,
help="Verbosity level. -v for DEBUG")
verbosity_group.add_argument("-q", "--quiet",
action="store_const",
dest="verbose",
const=0,
help="To silence the INFO messages")
args = parser.parse_args()
logging_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
logging_level = logging_map.get(args.verbose, logging.DEBUG)
logging.basicConfig(level=logging_level)
logging.info("Starting")
if args.file_list:
download_from_file_list(args.file_list, args.output_path)
elif args.category_name:
download_from_category(args.category_name, args.output_path, args.width)
elif args.files:
download_from_files(args.files, args.output_path, args.width)
else:
parser.print_help() | python | def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
source_group.add_argument("-c", "--category", metavar="CATEGORY",
dest="category_name",
type=str,
help='A category name (without prefix)')
parser.add_argument("files", nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-w", "--width",
dest="width",
type=int,
default=100,
help='The width of the thumbnail (default: 100)')
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument("-v",
action="count",
dest="verbose",
default=1,
help="Verbosity level. -v for DEBUG")
verbosity_group.add_argument("-q", "--quiet",
action="store_const",
dest="verbose",
const=0,
help="To silence the INFO messages")
args = parser.parse_args()
logging_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
logging_level = logging_map.get(args.verbose, logging.DEBUG)
logging.basicConfig(level=logging_level)
logging.info("Starting")
if args.file_list:
download_from_file_list(args.file_list, args.output_path)
elif args.category_name:
download_from_category(args.category_name, args.output_path, args.width)
elif args.files:
download_from_files(args.files, args.output_path, args.width)
else:
parser.print_help() | [
"def",
"main",
"(",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"description",
"=",
"\"Download a bunch of thumbnails from Wikimedia Commons\"",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"source_group",
"=",
"parser",
".... | Main method, entry point of the script. | [
"Main",
"method",
"entry",
"point",
"of",
"the",
"script",
"."
] | ac8147432b31ce3cdee5f7a75d0c48b788ee4666 | https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L122-L175 | train | 33,547 |
lnoor/sphinx-jsonschema | sphinx-jsonschema/__init__.py | JsonSchema.ordered_load | def ordered_load(self, stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
"""Allows you to use `pyyaml` to load as OrderedDict.
Taken from https://stackoverflow.com/a/21912744/1927102
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
try:
try:
result = yaml.load(stream, OrderedLoader)
except yaml.scanner.ScannerError:
if type(stream) == str:
result = json.loads(stream, object_pairs_hook=object_pairs_hook)
else:
stream.seek(0)
result = json.load(stream, object_pairs_hook=object_pairs_hook)
except Exception as e:
self.error(e)
result = {}
return result | python | def ordered_load(self, stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
"""Allows you to use `pyyaml` to load as OrderedDict.
Taken from https://stackoverflow.com/a/21912744/1927102
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
try:
try:
result = yaml.load(stream, OrderedLoader)
except yaml.scanner.ScannerError:
if type(stream) == str:
result = json.loads(stream, object_pairs_hook=object_pairs_hook)
else:
stream.seek(0)
result = json.load(stream, object_pairs_hook=object_pairs_hook)
except Exception as e:
self.error(e)
result = {}
return result | [
"def",
"ordered_load",
"(",
"self",
",",
"stream",
",",
"Loader",
"=",
"yaml",
".",
"Loader",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
":",
"class",
"OrderedLoader",
"(",
"Loader",
")",
":",
"pass",
"def",
"construct_mapping",
"(",
"loader",
",",
... | Allows you to use `pyyaml` to load as OrderedDict.
Taken from https://stackoverflow.com/a/21912744/1927102 | [
"Allows",
"you",
"to",
"use",
"pyyaml",
"to",
"load",
"as",
"OrderedDict",
"."
] | e348b7ecf2ff67ab40dc80b02d0cb17e983aa445 | https://github.com/lnoor/sphinx-jsonschema/blob/e348b7ecf2ff67ab40dc80b02d0cb17e983aa445/sphinx-jsonschema/__init__.py#L83-L109 | train | 33,548 |
fgimian/paramiko-expect | paramiko_expect.py | SSHClientInteraction.send | def send(self, send_string, newline=None):
"""Saves and sends the send string provided."""
self.current_send_string = send_string
newline = newline if newline is not None else self.newline
self.channel.send(send_string + newline) | python | def send(self, send_string, newline=None):
"""Saves and sends the send string provided."""
self.current_send_string = send_string
newline = newline if newline is not None else self.newline
self.channel.send(send_string + newline) | [
"def",
"send",
"(",
"self",
",",
"send_string",
",",
"newline",
"=",
"None",
")",
":",
"self",
".",
"current_send_string",
"=",
"send_string",
"newline",
"=",
"newline",
"if",
"newline",
"is",
"not",
"None",
"else",
"self",
".",
"newline",
"self",
".",
"... | Saves and sends the send string provided. | [
"Saves",
"and",
"sends",
"the",
"send",
"string",
"provided",
"."
] | 33dd3af745eb420d41e4ae7145b2ed536b2fccf0 | https://github.com/fgimian/paramiko-expect/blob/33dd3af745eb420d41e4ae7145b2ed536b2fccf0/paramiko_expect.py#L208-L213 | train | 33,549 |
fgimian/paramiko-expect | paramiko_expect.py | SSHClientInteraction.tail | def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
"""
This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout to the maximum integer the server allows,
# setting this to None breaks the KeyboardInterrupt exception and
# won't allow us to Ctrl+C out of teh script
timeout = timeout if timeout else 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1
self.channel.settimeout(timeout)
# Create an empty line buffer and a line counter
current_line = b''
line_counter = 0
line_feed_byte = '\n'.encode(self.encoding)
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if buffer == line_feed_byte:
current_line_decoded = current_line.decode(self.encoding)
if line_counter:
if callback:
output_callback(callback(line_prefix, current_line_decoded))
else:
if line_prefix:
output_callback(line_prefix)
output_callback(current_line_decoded)
if stop_callback(current_line_decoded):
break
line_counter += 1
current_line = b'' | python | def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
"""
This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout to the maximum integer the server allows,
# setting this to None breaks the KeyboardInterrupt exception and
# won't allow us to Ctrl+C out of teh script
timeout = timeout if timeout else 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1
self.channel.settimeout(timeout)
# Create an empty line buffer and a line counter
current_line = b''
line_counter = 0
line_feed_byte = '\n'.encode(self.encoding)
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if buffer == line_feed_byte:
current_line_decoded = current_line.decode(self.encoding)
if line_counter:
if callback:
output_callback(callback(line_prefix, current_line_decoded))
else:
if line_prefix:
output_callback(line_prefix)
output_callback(current_line_decoded)
if stop_callback(current_line_decoded):
break
line_counter += 1
current_line = b'' | [
"def",
"tail",
"(",
"self",
",",
"line_prefix",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"output_callback",
"=",
"None",
",",
"stop_callback",
"=",
"lambda",
"x",
":",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"output_callback",
"=",
"output... | This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever. | [
"This",
"function",
"takes",
"control",
"of",
"an",
"SSH",
"channel",
"and",
"displays",
"line",
"by",
"line",
"of",
"output",
"as",
"\\",
"n",
"is",
"recieved",
".",
"This",
"function",
"is",
"specifically",
"made",
"for",
"tail",
"-",
"like",
"commands",... | 33dd3af745eb420d41e4ae7145b2ed536b2fccf0 | https://github.com/fgimian/paramiko-expect/blob/33dd3af745eb420d41e4ae7145b2ed536b2fccf0/paramiko_expect.py#L215-L286 | train | 33,550 |
fgimian/paramiko-expect | paramiko_expect.py | SSHClientInteraction.take_control | def take_control(self):
"""
This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko.
"""
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], [])
)
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer.decode(self.encoding))
sys.stdout.flush()
except socket.timeout:
pass
# Send any keyboard input to the terminal one byte at a
# time
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
# Restore the attributes of the shell you were in
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer.decode(self.encoding))
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
# User has hit Ctrl+Z or F6
except EOFError:
pass | python | def take_control(self):
"""
This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko.
"""
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], [])
)
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer.decode(self.encoding))
sys.stdout.flush()
except socket.timeout:
pass
# Send any keyboard input to the terminal one byte at a
# time
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
# Restore the attributes of the shell you were in
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer.decode(self.encoding))
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
# User has hit Ctrl+Z or F6
except EOFError:
pass | [
"def",
"take_control",
"(",
"self",
")",
":",
"if",
"has_termios",
":",
"# Get attributes of the shell you were in before going to the",
"# new one",
"original_tty",
"=",
"termios",
".",
"tcgetattr",
"(",
"sys",
".",
"stdin",
")",
"try",
":",
"tty",
".",
"setraw",
... | This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko. | [
"This",
"function",
"is",
"a",
"better",
"documented",
"and",
"touched",
"up",
"version",
"of",
"the",
"posix_shell",
"function",
"found",
"in",
"the",
"interactive",
".",
"py",
"demo",
"script",
"that",
"ships",
"with",
"Paramiko",
"."
] | 33dd3af745eb420d41e4ae7145b2ed536b2fccf0 | https://github.com/fgimian/paramiko-expect/blob/33dd3af745eb420d41e4ae7145b2ed536b2fccf0/paramiko_expect.py#L288-L354 | train | 33,551 |
Celeo/Preston | preston/preston.py | Preston._get_access_from_refresh | def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in']) | python | def _get_access_from_refresh(self) -> Tuple[str, float]:
"""Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now)
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
response_data = r.json()
return (response_data['access_token'], response_data['expires_in']) | [
"def",
"_get_access_from_refresh",
"(",
"self",
")",
"->",
"Tuple",
"[",
"str",
",",
"float",
"]",
":",
"headers",
"=",
"self",
".",
"_get_authorization_headers",
"(",
")",
"data",
"=",
"{",
"'grant_type'",
":",
"'refresh_token'",
",",
"'refresh_token'",
":",
... | Uses the stored refresh token to get a new access token.
This method assumes that the refresh token exists.
Args:
None
Returns:
new access token and expiration time (from now) | [
"Uses",
"the",
"stored",
"refresh",
"token",
"to",
"get",
"a",
"new",
"access",
"token",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L91-L109 | train | 33,552 |
Celeo/Preston | preston/preston.py | Preston._get_authorization_headers | def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers | python | def _get_authorization_headers(self) -> dict:
"""Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints
"""
auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')
auth = auth.replace('\n', '').replace(' ', '')
auth = 'Basic {}'.format(auth)
headers = {'Authorization': auth}
return headers | [
"def",
"_get_authorization_headers",
"(",
"self",
")",
"->",
"dict",
":",
"auth",
"=",
"base64",
".",
"encodestring",
"(",
"(",
"self",
".",
"client_id",
"+",
"':'",
"+",
"self",
".",
"client_secret",
")",
".",
"encode",
"(",
"'latin-1'",
")",
")",
".",
... | Constructs and returns the Authorization header for the client app.
Args:
None
Returns:
header dict for communicating with the authorization endpoints | [
"Constructs",
"and",
"returns",
"the",
"Authorization",
"header",
"for",
"the",
"client",
"app",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L111-L124 | train | 33,553 |
Celeo/Preston | preston/preston.py | Preston._try_refresh_access_token | def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration | python | def _try_refresh_access_token(self) -> None:
"""Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None
"""
if self.refresh_token:
if not self.access_token or self._is_access_token_expired():
self.access_token, self.access_expiration = self._get_access_from_refresh()
self.access_expiration = time.time() + self.access_expiration | [
"def",
"_try_refresh_access_token",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"refresh_token",
":",
"if",
"not",
"self",
".",
"access_token",
"or",
"self",
".",
"_is_access_token_expired",
"(",
")",
":",
"self",
".",
"access_token",
",",
"self"... | Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
None | [
"Attempts",
"to",
"get",
"a",
"new",
"access",
"token",
"using",
"the",
"refresh",
"token",
"if",
"needed",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L126-L142 | train | 33,554 |
Celeo/Preston | preston/preston.py | Preston.authenticate | def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs) | python | def authenticate(self, code: str) -> 'Preston':
"""Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated
"""
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs) | [
"def",
"authenticate",
"(",
"self",
",",
"code",
":",
"str",
")",
"->",
"'Preston'",
":",
"headers",
"=",
"self",
".",
"_get_authorization_headers",
"(",
")",
"data",
"=",
"{",
"'grant_type'",
":",
"'authorization_code'",
",",
"'code'",
":",
"code",
"}",
"... | Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated | [
"Authenticates",
"using",
"the",
"code",
"from",
"the",
"EVE",
"SSO",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L173-L201 | train | 33,555 |
Celeo/Preston | preston/preston.py | Preston._get_spec | def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec | python | def _get_spec(self) -> dict:
"""Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
"""
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec | [
"def",
"_get_spec",
"(",
"self",
")",
"->",
"dict",
":",
"if",
"self",
".",
"spec",
":",
"return",
"self",
".",
"spec",
"self",
".",
"spec",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"SPEC_URL",
".",
"format",
"(",
"self",
".",
"version",
")",... | Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data | [
"Fetches",
"the",
"OpenAPI",
"spec",
"from",
"the",
"server",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L220-L234 | train | 33,556 |
Celeo/Preston | preston/preston.py | Preston._get_path_for_op_id | def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None | python | def _get_path_for_op_id(self, id: str) -> Optional[str]:
"""Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found
"""
for path_key, path_value in self._get_spec()['paths'].items():
for method in self.METHODS:
if method in path_value:
if self.OPERATION_ID_KEY in path_value[method]:
if path_value[method][self.OPERATION_ID_KEY] == id:
return path_key
return None | [
"def",
"_get_path_for_op_id",
"(",
"self",
",",
"id",
":",
"str",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"for",
"path_key",
",",
"path_value",
"in",
"self",
".",
"_get_spec",
"(",
")",
"[",
"'paths'",
"]",
".",
"items",
"(",
")",
":",
"for",
... | Searches the spec for a path matching the operation id.
Args:
id: operation id
Returns:
path to the endpoint, or None if not found | [
"Searches",
"the",
"spec",
"for",
"a",
"path",
"matching",
"the",
"operation",
"id",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L236-L251 | train | 33,557 |
Celeo/Preston | preston/preston.py | Preston._insert_vars | def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with) | python | def _insert_vars(self, path: str, data: dict) -> str:
"""Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled
"""
data = data.copy()
while True:
match = re.search(self.VAR_REPLACE_REGEX, path)
if not match:
return path
replace_from = match.group(0)
replace_with = str(data.get(match.group(1)))
path = path.replace(replace_from, replace_with) | [
"def",
"_insert_vars",
"(",
"self",
",",
"path",
":",
"str",
",",
"data",
":",
"dict",
")",
"->",
"str",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"while",
"True",
":",
"match",
"=",
"re",
".",
"search",
"(",
"self",
".",
"VAR_REPLACE_REGEX"... | Inserts variables into the ESI URL path.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
path with variables filled | [
"Inserts",
"variables",
"into",
"the",
"ESI",
"URL",
"path",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L253-L270 | train | 33,558 |
Celeo/Preston | preston/preston.py | Preston.whoami | def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json() | python | def whoami(self) -> dict:
"""Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict
"""
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json() | [
"def",
"whoami",
"(",
"self",
")",
"->",
"dict",
":",
"if",
"not",
"self",
".",
"access_token",
":",
"return",
"{",
"}",
"self",
".",
"_try_refresh_access_token",
"(",
")",
"return",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"WHOAMI_URL",
"... | Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict | [
"Returns",
"the",
"basic",
"information",
"about",
"the",
"authenticated",
"character",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L272-L287 | train | 33,559 |
Celeo/Preston | preston/preston.py | Preston.get_path | def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json() | python | def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:
"""Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data
"""
path = self._insert_vars(path, data)
path = self.BASE_URL + path
data = self.cache.check(path)
if data:
return data
self._try_refresh_access_token()
r = self.session.get(path)
self.cache.set(r)
return r.json() | [
"def",
"get_path",
"(",
"self",
",",
"path",
":",
"str",
",",
"data",
":",
"dict",
")",
"->",
"Tuple",
"[",
"dict",
",",
"dict",
"]",
":",
"path",
"=",
"self",
".",
"_insert_vars",
"(",
"path",
",",
"data",
")",
"path",
"=",
"self",
".",
"BASE_UR... | Queries the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
data: data to insert into the URL
Returns:
ESI data | [
"Queries",
"the",
"ESI",
"by",
"an",
"endpoint",
"URL",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L289-L311 | train | 33,560 |
Celeo/Preston | preston/preston.py | Preston.get_op | def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs) | python | def get_op(self, id: str, **kwargs: str) -> dict:
"""Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.get_path(path, kwargs) | [
"def",
"get_op",
"(",
"self",
",",
"id",
":",
"str",
",",
"*",
"*",
"kwargs",
":",
"str",
")",
"->",
"dict",
":",
"path",
"=",
"self",
".",
"_get_path_for_op_id",
"(",
"id",
")",
"return",
"self",
".",
"get_path",
"(",
"path",
",",
"kwargs",
")"
] | Queries the ESI by looking up an operation id.
Endpoints are cached, so calls to this method
for the same op and args will return the data
from the cache instead of making the API call.
Args:
id: operation id
kwargs: data to populate the endpoint's URL variables
Returns:
ESI data | [
"Queries",
"the",
"ESI",
"by",
"looking",
"up",
"an",
"operation",
"id",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L313-L328 | train | 33,561 |
Celeo/Preston | preston/preston.py | Preston.post_path | def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json() | python | def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._insert_vars(path, path_data or {})
path = self.BASE_URL + path
self._try_refresh_access_token()
return self.session.post(path, json=post_data).json() | [
"def",
"post_path",
"(",
"self",
",",
"path",
":",
"str",
",",
"path_data",
":",
"Union",
"[",
"dict",
",",
"None",
"]",
",",
"post_data",
":",
"Any",
")",
"->",
"dict",
":",
"path",
"=",
"self",
".",
"_insert_vars",
"(",
"path",
",",
"path_data",
... | Modifies the ESI by an endpoint URL.
This method is not marked "private" as it _can_ be used
by consuming code, but it's probably easier to call the
`get_op` method instead.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data | [
"Modifies",
"the",
"ESI",
"by",
"an",
"endpoint",
"URL",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L330-L348 | train | 33,562 |
Celeo/Preston | preston/preston.py | Preston.post_op | def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data) | python | def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data) | [
"def",
"post_op",
"(",
"self",
",",
"id",
":",
"str",
",",
"path_data",
":",
"Union",
"[",
"dict",
",",
"None",
"]",
",",
"post_data",
":",
"Any",
")",
"->",
"dict",
":",
"path",
"=",
"self",
".",
"_get_path_for_op_id",
"(",
"id",
")",
"return",
"s... | Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data | [
"Modifies",
"the",
"ESI",
"by",
"looking",
"up",
"an",
"operation",
"id",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L350-L362 | train | 33,563 |
Celeo/Preston | preston/cache.py | Cache._get_expiration | def _get_expiration(self, headers: dict) -> int:
"""Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires
"""
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta)) | python | def _get_expiration(self, headers: dict) -> int:
"""Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires
"""
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta)) | [
"def",
"_get_expiration",
"(",
"self",
",",
"headers",
":",
"dict",
")",
"->",
"int",
":",
"expiration_str",
"=",
"headers",
".",
"get",
"(",
"'expires'",
")",
"if",
"not",
"expiration_str",
":",
"return",
"0",
"expiration",
"=",
"datetime",
".",
"strptime... | Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires | [
"Gets",
"the",
"expiration",
"time",
"of",
"the",
"data",
"from",
"the",
"response",
"headers",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L23-L37 | train | 33,564 |
Celeo/Preston | preston/cache.py | Cache.set | def set(self, response: 'requests.Response') -> None:
"""Adds a response to the cache.
Args:
response: response from ESI
Returns:
None
"""
self.data[response.url] = SavedEndpoint(
response.json(),
self._get_expiration(response.headers)
) | python | def set(self, response: 'requests.Response') -> None:
"""Adds a response to the cache.
Args:
response: response from ESI
Returns:
None
"""
self.data[response.url] = SavedEndpoint(
response.json(),
self._get_expiration(response.headers)
) | [
"def",
"set",
"(",
"self",
",",
"response",
":",
"'requests.Response'",
")",
"->",
"None",
":",
"self",
".",
"data",
"[",
"response",
".",
"url",
"]",
"=",
"SavedEndpoint",
"(",
"response",
".",
"json",
"(",
")",
",",
"self",
".",
"_get_expiration",
"(... | Adds a response to the cache.
Args:
response: response from ESI
Returns:
None | [
"Adds",
"a",
"response",
"to",
"the",
"cache",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L39-L51 | train | 33,565 |
Celeo/Preston | preston/cache.py | Cache._check_expiration | def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
"""Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired
"""
if data.expires_after < time.time():
del self.data[url]
data = None
return data | python | def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
"""Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired
"""
if data.expires_after < time.time():
del self.data[url]
data = None
return data | [
"def",
"_check_expiration",
"(",
"self",
",",
"url",
":",
"str",
",",
"data",
":",
"'SavedEndpoint'",
")",
"->",
"'SavedEndpoint'",
":",
"if",
"data",
".",
"expires_after",
"<",
"time",
".",
"time",
"(",
")",
":",
"del",
"self",
".",
"data",
"[",
"url"... | Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired | [
"Checks",
"the",
"expiration",
"time",
"for",
"data",
"for",
"a",
"url",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L53-L68 | train | 33,566 |
Celeo/Preston | preston/cache.py | Cache.check | def check(self, url: str) -> Optional[dict]:
"""Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
"""
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return data.data if data else None | python | def check(self, url: str) -> Optional[dict]:
"""Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
"""
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return data.data if data else None | [
"def",
"check",
"(",
"self",
",",
"url",
":",
"str",
")",
"->",
"Optional",
"[",
"dict",
"]",
":",
"data",
"=",
"self",
".",
"data",
".",
"get",
"(",
"url",
")",
"if",
"data",
":",
"data",
"=",
"self",
".",
"_check_expiration",
"(",
"url",
",",
... | Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None | [
"Check",
"if",
"data",
"for",
"a",
"url",
"has",
"expired",
"."
] | 7c94bf0b7dabecad0bd8b66229b2906dabdb8e79 | https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/cache.py#L70-L84 | train | 33,567 |
gplepage/gvar | src/gvar/linalg.py | eigvalsh | def eigvalsh(a, eigvec=False):
""" Eigenvalues of Hermitian matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True``, method returns a tuple of arrays
``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a``, and ``vec[:, i]`` are the mean
values of the corresponding eigenvectors. Only ``val`` is
returned if ``eigvec=False`` (default).
Returns:
Array ``val`` of eigenvalues of matrix ``a`` if parameter
``eigvec==False`` (default); otherwise a tuple of
arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues
(in ascending order) and ``vec[:, i]`` are the mean values
of the corresponding eigenvectors.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
if eigvec == True:
val, vec = eigh(a, eigvec=True)
return val, gvar.mean(vec)
else:
return eigh(a, eigvec=False) | python | def eigvalsh(a, eigvec=False):
""" Eigenvalues of Hermitian matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True``, method returns a tuple of arrays
``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a``, and ``vec[:, i]`` are the mean
values of the corresponding eigenvectors. Only ``val`` is
returned if ``eigvec=False`` (default).
Returns:
Array ``val`` of eigenvalues of matrix ``a`` if parameter
``eigvec==False`` (default); otherwise a tuple of
arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues
(in ascending order) and ``vec[:, i]`` are the mean values
of the corresponding eigenvectors.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
if eigvec == True:
val, vec = eigh(a, eigvec=True)
return val, gvar.mean(vec)
else:
return eigh(a, eigvec=False) | [
"def",
"eigvalsh",
"(",
"a",
",",
"eigvec",
"=",
"False",
")",
":",
"if",
"eigvec",
"==",
"True",
":",
"val",
",",
"vec",
"=",
"eigh",
"(",
"a",
",",
"eigvec",
"=",
"True",
")",
"return",
"val",
",",
"gvar",
".",
"mean",
"(",
"vec",
")",
"else"... | Eigenvalues of Hermitian matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True``, method returns a tuple of arrays
``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a``, and ``vec[:, i]`` are the mean
values of the corresponding eigenvectors. Only ``val`` is
returned if ``eigvec=False`` (default).
Returns:
Array ``val`` of eigenvalues of matrix ``a`` if parameter
``eigvec==False`` (default); otherwise a tuple of
arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues
(in ascending order) and ``vec[:, i]`` are the mean values
of the corresponding eigenvectors.
Raises:
ValueError: If matrix is not square and two-dimensional. | [
"Eigenvalues",
"of",
"Hermitian",
"matrix",
"a",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/linalg.py#L62-L90 | train | 33,568 |
gplepage/gvar | src/gvar/linalg.py | eigh | def eigh(a, eigvec=True, rcond=None):
""" Eigenvalues and eigenvectors of symmetric matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True`` (default), method returns a tuple
of arrays ``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a`` (in ascending order), and ``vec[:, i]``
are the corresponding eigenvectors of ``a``. Only ``val`` is
returned if ``eigvec=False``.
rcond (float): Eigenvalues whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate
(and ignored) when computing variances for the eigvectors.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(val,vec)`` of eigenvalues and eigenvectors of
matrix ``a`` if parameter ``eigvec==True`` (default).
The eigenvalues ``val[i]`` are in ascending order and
``vec[:, i]`` are the corresponding eigenvalues. Only
the eigenvalues ``val`` are returned if ``eigvec=False``.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
a = numpy.asarray(a)
if a.dtype != object:
val, vec = numpy.linalg.eigh(a)
return (val, vec) if eigvec else val
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
val0, vec0 = numpy.linalg.eigh(amean)
val = val0 + [
vec0[:, i].conjugate().dot(da.dot(vec0[:, i])) for i in range(vec0.shape[1])
]
if eigvec == True:
if vec0.dtype == complex:
raise ValueError('cannot evaluate eigenvectors when a is complex')
vec = numpy.array(vec0, dtype=object)
for i in range(len(val)):
for j in range(len(val)):
dval = val0[i] - val0[j]
if abs(dval) < rcond * abs(val0[j] + val0[i]) or dval == 0.0:
continue
vec[:, i] += vec0[:, j] * (
vec0[:, j].dot(da.dot(vec0[:, i])) / dval
)
return val, vec
else:
return val | python | def eigh(a, eigvec=True, rcond=None):
""" Eigenvalues and eigenvectors of symmetric matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True`` (default), method returns a tuple
of arrays ``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a`` (in ascending order), and ``vec[:, i]``
are the corresponding eigenvectors of ``a``. Only ``val`` is
returned if ``eigvec=False``.
rcond (float): Eigenvalues whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate
(and ignored) when computing variances for the eigvectors.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(val,vec)`` of eigenvalues and eigenvectors of
matrix ``a`` if parameter ``eigvec==True`` (default).
The eigenvalues ``val[i]`` are in ascending order and
``vec[:, i]`` are the corresponding eigenvalues. Only
the eigenvalues ``val`` are returned if ``eigvec=False``.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
a = numpy.asarray(a)
if a.dtype != object:
val, vec = numpy.linalg.eigh(a)
return (val, vec) if eigvec else val
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
val0, vec0 = numpy.linalg.eigh(amean)
val = val0 + [
vec0[:, i].conjugate().dot(da.dot(vec0[:, i])) for i in range(vec0.shape[1])
]
if eigvec == True:
if vec0.dtype == complex:
raise ValueError('cannot evaluate eigenvectors when a is complex')
vec = numpy.array(vec0, dtype=object)
for i in range(len(val)):
for j in range(len(val)):
dval = val0[i] - val0[j]
if abs(dval) < rcond * abs(val0[j] + val0[i]) or dval == 0.0:
continue
vec[:, i] += vec0[:, j] * (
vec0[:, j].dot(da.dot(vec0[:, i])) / dval
)
return val, vec
else:
return val | [
"def",
"eigh",
"(",
"a",
",",
"eigvec",
"=",
"True",
",",
"rcond",
"=",
"None",
")",
":",
"a",
"=",
"numpy",
".",
"asarray",
"(",
"a",
")",
"if",
"a",
".",
"dtype",
"!=",
"object",
":",
"val",
",",
"vec",
"=",
"numpy",
".",
"linalg",
".",
"ei... | Eigenvalues and eigenvectors of symmetric matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True`` (default), method returns a tuple
of arrays ``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a`` (in ascending order), and ``vec[:, i]``
are the corresponding eigenvectors of ``a``. Only ``val`` is
returned if ``eigvec=False``.
rcond (float): Eigenvalues whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate
(and ignored) when computing variances for the eigvectors.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(val,vec)`` of eigenvalues and eigenvectors of
matrix ``a`` if parameter ``eigvec==True`` (default).
The eigenvalues ``val[i]`` are in ascending order and
``vec[:, i]`` are the corresponding eigenvalues. Only
the eigenvalues ``val`` are returned if ``eigvec=False``.
Raises:
ValueError: If matrix is not square and two-dimensional. | [
"Eigenvalues",
"and",
"eigenvectors",
"of",
"symmetric",
"matrix",
"a",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/linalg.py#L93-L149 | train | 33,569 |
gplepage/gvar | src/gvar/linalg.py | svd | def svd(a, compute_uv=True, rcond=None):
""" svd decomposition of matrix ``a`` containing |GVar|\s.
Args:
a: Two-dimensional matrix/array of numbers
and/or :class:`gvar.GVar`\s.
compute_uv (bool): It ``True`` (default), returns
tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. Only ``s`` is returned if ``compute_uv=False``.
rcond (float): Singular values whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate for
calculating variances for ``u`` and ``vT``.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)``
and ``vT.shape=(K,M)`` where ``K`` is the number of
nonzero singular values (``len(s)==K``).
If ``compute_uv==False`` only ``s`` is returned.
Raises:
ValueError: If matrix is not two-dimensional.
"""
a = numpy.asarray(a)
if a.dtype != object:
return numpy.linalg.svd(a, compute_uv=compute_uv)
amean = gvar.mean(a)
if amean.ndim != 2:
raise ValueError(
'matrix must have dimension 2: actual shape = ' + str(a.shape)
)
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
u0,s0,v0T = numpy.linalg.svd(amean, compute_uv=True, full_matrices=True)
k = min(a.shape)
s = s0 + [
u0[:, i].dot(da.dot(v0T[i, :])) for i in range(k)
]
if compute_uv:
u = numpy.array(u0, dtype=object)
vT = numpy.array(v0T, dtype=object)
# u first
daaT = da.dot(a.T) + a.dot(da.T)
s02 = numpy.zeros(daaT.shape[0], float)
s02[:len(s0)] = s0 ** 2
for j in range(s02.shape[0]):
for i in range(k):
if i == j:
continue
ds2 = s02[i] - s02[j]
if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
continue
u[:, i] += u0[:, j] * u0[:, j].dot(daaT.dot(u0[:, i])) / ds2
# v next
daTa = da.T.dot(a) + a.T.dot(da)
s02 = numpy.zeros(daTa.shape[0], float)
s02[:len(s0)] = s0 ** 2
for j in range(s02.shape[0]):
for i in range(k):
if i == j:
continue
ds2 = s02[i] - s02[j]
if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
continue
vT[i, :] += v0T[j, :] * v0T[j, :].dot(daTa.dot(v0T[i, :])) / ds2
return u[:,:k], s, vT[:k, :]
else:
return s | python | def svd(a, compute_uv=True, rcond=None):
""" svd decomposition of matrix ``a`` containing |GVar|\s.
Args:
a: Two-dimensional matrix/array of numbers
and/or :class:`gvar.GVar`\s.
compute_uv (bool): It ``True`` (default), returns
tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. Only ``s`` is returned if ``compute_uv=False``.
rcond (float): Singular values whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate for
calculating variances for ``u`` and ``vT``.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)``
and ``vT.shape=(K,M)`` where ``K`` is the number of
nonzero singular values (``len(s)==K``).
If ``compute_uv==False`` only ``s`` is returned.
Raises:
ValueError: If matrix is not two-dimensional.
"""
a = numpy.asarray(a)
if a.dtype != object:
return numpy.linalg.svd(a, compute_uv=compute_uv)
amean = gvar.mean(a)
if amean.ndim != 2:
raise ValueError(
'matrix must have dimension 2: actual shape = ' + str(a.shape)
)
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
u0,s0,v0T = numpy.linalg.svd(amean, compute_uv=True, full_matrices=True)
k = min(a.shape)
s = s0 + [
u0[:, i].dot(da.dot(v0T[i, :])) for i in range(k)
]
if compute_uv:
u = numpy.array(u0, dtype=object)
vT = numpy.array(v0T, dtype=object)
# u first
daaT = da.dot(a.T) + a.dot(da.T)
s02 = numpy.zeros(daaT.shape[0], float)
s02[:len(s0)] = s0 ** 2
for j in range(s02.shape[0]):
for i in range(k):
if i == j:
continue
ds2 = s02[i] - s02[j]
if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
continue
u[:, i] += u0[:, j] * u0[:, j].dot(daaT.dot(u0[:, i])) / ds2
# v next
daTa = da.T.dot(a) + a.T.dot(da)
s02 = numpy.zeros(daTa.shape[0], float)
s02[:len(s0)] = s0 ** 2
for j in range(s02.shape[0]):
for i in range(k):
if i == j:
continue
ds2 = s02[i] - s02[j]
if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0:
continue
vT[i, :] += v0T[j, :] * v0T[j, :].dot(daTa.dot(v0T[i, :])) / ds2
return u[:,:k], s, vT[:k, :]
else:
return s | [
"def",
"svd",
"(",
"a",
",",
"compute_uv",
"=",
"True",
",",
"rcond",
"=",
"None",
")",
":",
"a",
"=",
"numpy",
".",
"asarray",
"(",
"a",
")",
"if",
"a",
".",
"dtype",
"!=",
"object",
":",
"return",
"numpy",
".",
"linalg",
".",
"svd",
"(",
"a",... | svd decomposition of matrix ``a`` containing |GVar|\s.
Args:
a: Two-dimensional matrix/array of numbers
and/or :class:`gvar.GVar`\s.
compute_uv (bool): It ``True`` (default), returns
tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. Only ``s`` is returned if ``compute_uv=False``.
rcond (float): Singular values whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate for
calculating variances for ``u`` and ``vT``.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT``
where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1``
and ``vT @ vT.T = 1``, and ``s`` is the list of singular
values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)``
and ``vT.shape=(K,M)`` where ``K`` is the number of
nonzero singular values (``len(s)==K``).
If ``compute_uv==False`` only ``s`` is returned.
Raises:
ValueError: If matrix is not two-dimensional. | [
"svd",
"decomposition",
"of",
"matrix",
"a",
"containing",
"|GVar|",
"\\",
"s",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/linalg.py#L151-L224 | train | 33,570 |
gplepage/gvar | src/gvar/linalg.py | inv | def inv(a):
""" Inverse of matrix ``a``.
Args:
a: Two-dimensional, square matrix/array of numbers
and/or :class:`gvar.GVar`\s.
Returns:
The inverse of matrix ``a``.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
da = a - amean
ainv = numpy.linalg.inv(amean)
return ainv - ainv.dot(da.dot(ainv)) | python | def inv(a):
""" Inverse of matrix ``a``.
Args:
a: Two-dimensional, square matrix/array of numbers
and/or :class:`gvar.GVar`\s.
Returns:
The inverse of matrix ``a``.
Raises:
ValueError: If matrix is not square and two-dimensional.
"""
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError('bad matrix shape: ' + str(a.shape))
da = a - amean
ainv = numpy.linalg.inv(amean)
return ainv - ainv.dot(da.dot(ainv)) | [
"def",
"inv",
"(",
"a",
")",
":",
"amean",
"=",
"gvar",
".",
"mean",
"(",
"a",
")",
"if",
"amean",
".",
"ndim",
"!=",
"2",
"or",
"amean",
".",
"shape",
"[",
"0",
"]",
"!=",
"amean",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"("... | Inverse of matrix ``a``.
Args:
a: Two-dimensional, square matrix/array of numbers
and/or :class:`gvar.GVar`\s.
Returns:
The inverse of matrix ``a``.
Raises:
ValueError: If matrix is not square and two-dimensional. | [
"Inverse",
"of",
"matrix",
"a",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/linalg.py#L296-L314 | train | 33,571 |
gplepage/gvar | src/gvar/__init__.py | ranseed | def ranseed(seed=None):
""" Seed random number generators with tuple ``seed``.
Argument ``seed`` is an integer or
a :class:`tuple` of integers that is used to seed
the random number generators used by :mod:`numpy` and
:mod:`random` (and therefore by :mod:`gvar`). Reusing
the same ``seed`` results in the same set of random numbers.
``ranseed`` generates its own seed when called without an argument
or with ``seed=None``. This seed is stored in ``ranseed.seed`` and
also returned by the function. The seed can be used to regenerate
the same set of random numbers at a later time.
Args:
seed (int, tuple, or None): Seed for generator. Generates a
random tuple if ``None``.
Returns:
The seed used to reseed the generator.
"""
if seed is None:
seed = numpy.random.randint(1, int(2e9), size=3)
try:
seed = tuple(seed)
except TypeError:
pass
numpy.random.seed(seed)
ranseed.seed = seed
return seed | python | def ranseed(seed=None):
""" Seed random number generators with tuple ``seed``.
Argument ``seed`` is an integer or
a :class:`tuple` of integers that is used to seed
the random number generators used by :mod:`numpy` and
:mod:`random` (and therefore by :mod:`gvar`). Reusing
the same ``seed`` results in the same set of random numbers.
``ranseed`` generates its own seed when called without an argument
or with ``seed=None``. This seed is stored in ``ranseed.seed`` and
also returned by the function. The seed can be used to regenerate
the same set of random numbers at a later time.
Args:
seed (int, tuple, or None): Seed for generator. Generates a
random tuple if ``None``.
Returns:
The seed used to reseed the generator.
"""
if seed is None:
seed = numpy.random.randint(1, int(2e9), size=3)
try:
seed = tuple(seed)
except TypeError:
pass
numpy.random.seed(seed)
ranseed.seed = seed
return seed | [
"def",
"ranseed",
"(",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"numpy",
".",
"random",
".",
"randint",
"(",
"1",
",",
"int",
"(",
"2e9",
")",
",",
"size",
"=",
"3",
")",
"try",
":",
"seed",
"=",
"tuple",
"... | Seed random number generators with tuple ``seed``.
Argument ``seed`` is an integer or
a :class:`tuple` of integers that is used to seed
the random number generators used by :mod:`numpy` and
:mod:`random` (and therefore by :mod:`gvar`). Reusing
the same ``seed`` results in the same set of random numbers.
``ranseed`` generates its own seed when called without an argument
or with ``seed=None``. This seed is stored in ``ranseed.seed`` and
also returned by the function. The seed can be used to regenerate
the same set of random numbers at a later time.
Args:
seed (int, tuple, or None): Seed for generator. Generates a
random tuple if ``None``.
Returns:
The seed used to reseed the generator. | [
"Seed",
"random",
"number",
"generators",
"with",
"tuple",
"seed",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L146-L174 | train | 33,572 |
gplepage/gvar | src/gvar/__init__.py | erf | def erf(x):
""" Error function.
Works for floats, |GVar|\s, and :mod:`numpy` arrays.
"""
try:
return math.erf(x)
except TypeError:
pass
if isinstance(x, GVar):
f = math.erf(x.mean)
dfdx = 2. * math.exp(- x.mean ** 2) / math.sqrt(math.pi)
return gvar_function(x, f, dfdx)
else:
x = numpy.asarray(x)
ans = numpy.empty(x.shape, x.dtype)
for i in range(x.size):
try:
ans.flat[i] = erf(x.flat[i])
except TypeError:
xi = x.flat[i]
f = math.erf(xi.mean)
dfdx = 2. * math.exp(- xi.mean ** 2) / math.sqrt(math.pi)
ans.flat[i] = gvar_function(xi, f, dfdx)
return ans | python | def erf(x):
""" Error function.
Works for floats, |GVar|\s, and :mod:`numpy` arrays.
"""
try:
return math.erf(x)
except TypeError:
pass
if isinstance(x, GVar):
f = math.erf(x.mean)
dfdx = 2. * math.exp(- x.mean ** 2) / math.sqrt(math.pi)
return gvar_function(x, f, dfdx)
else:
x = numpy.asarray(x)
ans = numpy.empty(x.shape, x.dtype)
for i in range(x.size):
try:
ans.flat[i] = erf(x.flat[i])
except TypeError:
xi = x.flat[i]
f = math.erf(xi.mean)
dfdx = 2. * math.exp(- xi.mean ** 2) / math.sqrt(math.pi)
ans.flat[i] = gvar_function(xi, f, dfdx)
return ans | [
"def",
"erf",
"(",
"x",
")",
":",
"try",
":",
"return",
"math",
".",
"erf",
"(",
"x",
")",
"except",
"TypeError",
":",
"pass",
"if",
"isinstance",
"(",
"x",
",",
"GVar",
")",
":",
"f",
"=",
"math",
".",
"erf",
"(",
"x",
".",
"mean",
")",
"dfd... | Error function.
Works for floats, |GVar|\s, and :mod:`numpy` arrays. | [
"Error",
"function",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L805-L829 | train | 33,573 |
gplepage/gvar | src/gvar/__init__.py | make_fake_data | def make_fake_data(g, fac=1.0):
""" Make fake data based on ``g``.
This function replaces the |GVar|\s in ``g`` by new |GVar|\s with similar
means and a similar covariance matrix, but multiplied by ``fac**2`` (so
standard deviations are ``fac`` times smaller). The changes are random.
The function was designed to create fake data for testing fitting
routines, where ``g`` is set equal to ``fitfcn(x, prior)`` and ``fac<1``
(e.g., set ``fac=0.1`` to get fit parameters whose standard deviations
are 10x smaller than those of the corresponding priors).
Args:
g (dict, array or gvar.GVar): The |GVar| or array of |GVar|\s,
or dictionary whose values are |GVar|\s or arrays of |GVar|\s that
from which the fake data is generated.
fac (float): Uncertainties are rescaled by ``fac`` in the fake data.
Returns:
A collection of |GVar|\s with the same layout as ``g`` but with
somewhat different means, and standard deviations rescaled by ``fac``.
"""
if hasattr(g, 'keys'):
if not isinstance(g, BufferDict):
g = BufferDict(g)
return BufferDict(g, buf=make_fake_data(g.buf, fac))
else:
g_shape = numpy.shape(g)
g_flat = numpy.array(g).flat
zero = numpy.zeros(len(g_flat), float)
dg = (2. ** -0.5) * gvar(zero, evalcov(g_flat))
dg *= fac
noise = gvar(zero, sdev(dg))
g_flat = mean(g_flat) + dg + noise + next(raniter(dg + noise))
return g_flat[0] if g_shape == () else g_flat.reshape(g_shape) | python | def make_fake_data(g, fac=1.0):
""" Make fake data based on ``g``.
This function replaces the |GVar|\s in ``g`` by new |GVar|\s with similar
means and a similar covariance matrix, but multiplied by ``fac**2`` (so
standard deviations are ``fac`` times smaller). The changes are random.
The function was designed to create fake data for testing fitting
routines, where ``g`` is set equal to ``fitfcn(x, prior)`` and ``fac<1``
(e.g., set ``fac=0.1`` to get fit parameters whose standard deviations
are 10x smaller than those of the corresponding priors).
Args:
g (dict, array or gvar.GVar): The |GVar| or array of |GVar|\s,
or dictionary whose values are |GVar|\s or arrays of |GVar|\s that
from which the fake data is generated.
fac (float): Uncertainties are rescaled by ``fac`` in the fake data.
Returns:
A collection of |GVar|\s with the same layout as ``g`` but with
somewhat different means, and standard deviations rescaled by ``fac``.
"""
if hasattr(g, 'keys'):
if not isinstance(g, BufferDict):
g = BufferDict(g)
return BufferDict(g, buf=make_fake_data(g.buf, fac))
else:
g_shape = numpy.shape(g)
g_flat = numpy.array(g).flat
zero = numpy.zeros(len(g_flat), float)
dg = (2. ** -0.5) * gvar(zero, evalcov(g_flat))
dg *= fac
noise = gvar(zero, sdev(dg))
g_flat = mean(g_flat) + dg + noise + next(raniter(dg + noise))
return g_flat[0] if g_shape == () else g_flat.reshape(g_shape) | [
"def",
"make_fake_data",
"(",
"g",
",",
"fac",
"=",
"1.0",
")",
":",
"if",
"hasattr",
"(",
"g",
",",
"'keys'",
")",
":",
"if",
"not",
"isinstance",
"(",
"g",
",",
"BufferDict",
")",
":",
"g",
"=",
"BufferDict",
"(",
"g",
")",
"return",
"BufferDict"... | Make fake data based on ``g``.
This function replaces the |GVar|\s in ``g`` by new |GVar|\s with similar
means and a similar covariance matrix, but multiplied by ``fac**2`` (so
standard deviations are ``fac`` times smaller). The changes are random.
The function was designed to create fake data for testing fitting
routines, where ``g`` is set equal to ``fitfcn(x, prior)`` and ``fac<1``
(e.g., set ``fac=0.1`` to get fit parameters whose standard deviations
are 10x smaller than those of the corresponding priors).
Args:
g (dict, array or gvar.GVar): The |GVar| or array of |GVar|\s,
or dictionary whose values are |GVar|\s or arrays of |GVar|\s that
from which the fake data is generated.
fac (float): Uncertainties are rescaled by ``fac`` in the fake data.
Returns:
A collection of |GVar|\s with the same layout as ``g`` but with
somewhat different means, and standard deviations rescaled by ``fac``. | [
"Make",
"fake",
"data",
"based",
"on",
"g",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L1327-L1361 | train | 33,574 |
gplepage/gvar | src/gvar/__init__.py | PDF.p2x | def p2x(self, p):
""" Map parameters ``p`` to vector in x-space.
x-space is a vector space of dimension ``p.size``. Its axes are
in the directions specified by the eigenvectors of ``p``'s covariance
matrix, and distance along an axis is in units of the standard
deviation in that direction.
"""
if hasattr(p, 'keys'):
dp = BufferDict(p, keys=self.g.keys())._buf[:self.meanflat.size] - self.meanflat
else:
dp = numpy.asarray(p).reshape(-1) - self.meanflat
return self.vec_isig.dot(dp) | python | def p2x(self, p):
""" Map parameters ``p`` to vector in x-space.
x-space is a vector space of dimension ``p.size``. Its axes are
in the directions specified by the eigenvectors of ``p``'s covariance
matrix, and distance along an axis is in units of the standard
deviation in that direction.
"""
if hasattr(p, 'keys'):
dp = BufferDict(p, keys=self.g.keys())._buf[:self.meanflat.size] - self.meanflat
else:
dp = numpy.asarray(p).reshape(-1) - self.meanflat
return self.vec_isig.dot(dp) | [
"def",
"p2x",
"(",
"self",
",",
"p",
")",
":",
"if",
"hasattr",
"(",
"p",
",",
"'keys'",
")",
":",
"dp",
"=",
"BufferDict",
"(",
"p",
",",
"keys",
"=",
"self",
".",
"g",
".",
"keys",
"(",
")",
")",
".",
"_buf",
"[",
":",
"self",
".",
"meanf... | Map parameters ``p`` to vector in x-space.
x-space is a vector space of dimension ``p.size``. Its axes are
in the directions specified by the eigenvectors of ``p``'s covariance
matrix, and distance along an axis is in units of the standard
deviation in that direction. | [
"Map",
"parameters",
"p",
"to",
"vector",
"in",
"x",
"-",
"space",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L896-L908 | train | 33,575 |
gplepage/gvar | src/gvar/__init__.py | PDFHistogram.count | def count(self, data):
""" Compute histogram of data.
Counts the number of elements from array ``data`` in each bin of the
histogram. Results are returned in an array, call it ``h``, of
length ``nbin+2`` where ``h[0]`` is the number of data elements
that fall below the range of the histogram, ``h[-1]``
(i.e., ``h[nbin+1]``) is the number that fall above the range,
and ``h[i]`` is the number in the ``i``-th bin for ``i=1...nbin``.
Argument ``data`` can also be a float, in which case the result is the
same as from ``histogram([data])``. Note that the expectation value of
``count(f(p))`` over parameter values ``p`` drawn from a random
distribution gives the probabilities for values of ``f(p)`` to fall
in each histogram bin. Dividing by the bin widths gives the average
probability density for random variable ``f(p)`` in each bin.
Bin intervals are closed on the left and open on the right,
except for the last interval which is closed on both ends.
"""
if isinstance(data, float) or isinstance(data, int):
hist = numpy.zeros(self.nbin + 2, float)
if data > self.bins[-1]:
hist[-1] = 1.
elif data < self.bins[0]:
hist[0] = 1.
elif data == self.bins[-1]:
if self.nbin > 1:
hist[-2] = 1.
else:
hist[numpy.searchsorted(self.bins, data, side='right')] = 1.
return hist
if numpy.ndim(data) != 1:
data = numpy.reshape(data, -1)
else:
data = numpy.asarray(data)
middle = numpy.histogram(data, self.bins)[0]
below = numpy.sum(data < self.bins[0])
above = numpy.sum(data > self.bins[-1])
return numpy.array([below] + middle.tolist() + [above], float) | python | def count(self, data):
""" Compute histogram of data.
Counts the number of elements from array ``data`` in each bin of the
histogram. Results are returned in an array, call it ``h``, of
length ``nbin+2`` where ``h[0]`` is the number of data elements
that fall below the range of the histogram, ``h[-1]``
(i.e., ``h[nbin+1]``) is the number that fall above the range,
and ``h[i]`` is the number in the ``i``-th bin for ``i=1...nbin``.
Argument ``data`` can also be a float, in which case the result is the
same as from ``histogram([data])``. Note that the expectation value of
``count(f(p))`` over parameter values ``p`` drawn from a random
distribution gives the probabilities for values of ``f(p)`` to fall
in each histogram bin. Dividing by the bin widths gives the average
probability density for random variable ``f(p)`` in each bin.
Bin intervals are closed on the left and open on the right,
except for the last interval which is closed on both ends.
"""
if isinstance(data, float) or isinstance(data, int):
hist = numpy.zeros(self.nbin + 2, float)
if data > self.bins[-1]:
hist[-1] = 1.
elif data < self.bins[0]:
hist[0] = 1.
elif data == self.bins[-1]:
if self.nbin > 1:
hist[-2] = 1.
else:
hist[numpy.searchsorted(self.bins, data, side='right')] = 1.
return hist
if numpy.ndim(data) != 1:
data = numpy.reshape(data, -1)
else:
data = numpy.asarray(data)
middle = numpy.histogram(data, self.bins)[0]
below = numpy.sum(data < self.bins[0])
above = numpy.sum(data > self.bins[-1])
return numpy.array([below] + middle.tolist() + [above], float) | [
"def",
"count",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"float",
")",
"or",
"isinstance",
"(",
"data",
",",
"int",
")",
":",
"hist",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"nbin",
"+",
"2",
",",
"float",
... | Compute histogram of data.
Counts the number of elements from array ``data`` in each bin of the
histogram. Results are returned in an array, call it ``h``, of
length ``nbin+2`` where ``h[0]`` is the number of data elements
that fall below the range of the histogram, ``h[-1]``
(i.e., ``h[nbin+1]``) is the number that fall above the range,
and ``h[i]`` is the number in the ``i``-th bin for ``i=1...nbin``.
Argument ``data`` can also be a float, in which case the result is the
same as from ``histogram([data])``. Note that the expectation value of
``count(f(p))`` over parameter values ``p`` drawn from a random
distribution gives the probabilities for values of ``f(p)`` to fall
in each histogram bin. Dividing by the bin widths gives the average
probability density for random variable ``f(p)`` in each bin.
Bin intervals are closed on the left and open on the right,
except for the last interval which is closed on both ends. | [
"Compute",
"histogram",
"of",
"data",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L1018-L1057 | train | 33,576 |
gplepage/gvar | src/gvar/__init__.py | PDFHistogram.gaussian_pdf | def gaussian_pdf(x, g):
""" Gaussian probability density function at ``x`` for |GVar| ``g``. """
return (
numpy.exp(-(x - g.mean) ** 2 / 2. /g.var) /
numpy.sqrt(g.var * 2 * numpy.pi)
) | python | def gaussian_pdf(x, g):
""" Gaussian probability density function at ``x`` for |GVar| ``g``. """
return (
numpy.exp(-(x - g.mean) ** 2 / 2. /g.var) /
numpy.sqrt(g.var * 2 * numpy.pi)
) | [
"def",
"gaussian_pdf",
"(",
"x",
",",
"g",
")",
":",
"return",
"(",
"numpy",
".",
"exp",
"(",
"-",
"(",
"x",
"-",
"g",
".",
"mean",
")",
"**",
"2",
"/",
"2.",
"/",
"g",
".",
"var",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"g",
".",
"var",
"*"... | Gaussian probability density function at ``x`` for |GVar| ``g``. | [
"Gaussian",
"probability",
"density",
"function",
"at",
"x",
"for",
"|GVar|",
"g",
"."
] | d6671697319eb6280de3793c9a1c2b616c6f2ae0 | https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L1102-L1107 | train | 33,577 |
brandon-rhodes/python-sgp4 | sgp4/io.py | verify_checksum | def verify_checksum(*lines):
"""Verify the checksum of one or more TLE lines.
Raises `ValueError` if any of the lines fails its checksum, and
includes the failing line in the error message.
"""
for line in lines:
checksum = line[68:69]
if not checksum.isdigit():
continue
checksum = int(checksum)
computed = compute_checksum(line)
if checksum != computed:
complaint = ('TLE line gives its checksum as {}'
' but in fact tallies to {}:\n{}')
raise ValueError(complaint.format(checksum, computed, line)) | python | def verify_checksum(*lines):
"""Verify the checksum of one or more TLE lines.
Raises `ValueError` if any of the lines fails its checksum, and
includes the failing line in the error message.
"""
for line in lines:
checksum = line[68:69]
if not checksum.isdigit():
continue
checksum = int(checksum)
computed = compute_checksum(line)
if checksum != computed:
complaint = ('TLE line gives its checksum as {}'
' but in fact tallies to {}:\n{}')
raise ValueError(complaint.format(checksum, computed, line)) | [
"def",
"verify_checksum",
"(",
"*",
"lines",
")",
":",
"for",
"line",
"in",
"lines",
":",
"checksum",
"=",
"line",
"[",
"68",
":",
"69",
"]",
"if",
"not",
"checksum",
".",
"isdigit",
"(",
")",
":",
"continue",
"checksum",
"=",
"int",
"(",
"checksum",... | Verify the checksum of one or more TLE lines.
Raises `ValueError` if any of the lines fails its checksum, and
includes the failing line in the error message. | [
"Verify",
"the",
"checksum",
"of",
"one",
"or",
"more",
"TLE",
"lines",
"."
] | a1e19e32831d6814b3ab34f55b39b8520d291c4e | https://github.com/brandon-rhodes/python-sgp4/blob/a1e19e32831d6814b3ab34f55b39b8520d291c4e/sgp4/io.py#L234-L250 | train | 33,578 |
brandon-rhodes/python-sgp4 | sgp4/io.py | compute_checksum | def compute_checksum(line):
"""Compute the TLE checksum for the given line."""
return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10 | python | def compute_checksum(line):
"""Compute the TLE checksum for the given line."""
return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10 | [
"def",
"compute_checksum",
"(",
"line",
")",
":",
"return",
"sum",
"(",
"(",
"int",
"(",
"c",
")",
"if",
"c",
".",
"isdigit",
"(",
")",
"else",
"c",
"==",
"'-'",
")",
"for",
"c",
"in",
"line",
"[",
"0",
":",
"68",
"]",
")",
"%",
"10"
] | Compute the TLE checksum for the given line. | [
"Compute",
"the",
"TLE",
"checksum",
"for",
"the",
"given",
"line",
"."
] | a1e19e32831d6814b3ab34f55b39b8520d291c4e | https://github.com/brandon-rhodes/python-sgp4/blob/a1e19e32831d6814b3ab34f55b39b8520d291c4e/sgp4/io.py#L261-L263 | train | 33,579 |
brandon-rhodes/python-sgp4 | sgp4/model.py | Satellite.propagate | def propagate(self, year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Return a position and velocity vector for a given date and time."""
j = jday(year, month, day, hour, minute, second)
m = (j - self.jdsatepoch) * minutes_per_day
r, v = sgp4(self, m)
return r, v | python | def propagate(self, year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Return a position and velocity vector for a given date and time."""
j = jday(year, month, day, hour, minute, second)
m = (j - self.jdsatepoch) * minutes_per_day
r, v = sgp4(self, m)
return r, v | [
"def",
"propagate",
"(",
"self",
",",
"year",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0.0",
")",
":",
"j",
"=",
"jday",
"(",
"year",
",",
"month",
",",
"day",
",",
"... | Return a position and velocity vector for a given date and time. | [
"Return",
"a",
"position",
"and",
"velocity",
"vector",
"for",
"a",
"given",
"date",
"and",
"time",
"."
] | a1e19e32831d6814b3ab34f55b39b8520d291c4e | https://github.com/brandon-rhodes/python-sgp4/blob/a1e19e32831d6814b3ab34f55b39b8520d291c4e/sgp4/model.py#L44-L50 | train | 33,580 |
ratcashdev/mitemp | mitemp_bt/mitemp_bt_poller.py | MiTempBtPoller.name | def name(self):
"""Return the name of the sensor."""
with self._bt_interface.connect(self._mac) as connection:
name = connection.read_handle(_HANDLE_READ_NAME) # pylint: disable=no-member
if not name:
raise BluetoothBackendException("Could not read NAME using handle %s"
" from Mi Temp sensor %s" % (hex(_HANDLE_READ_NAME), self._mac))
return ''.join(chr(n) for n in name) | python | def name(self):
"""Return the name of the sensor."""
with self._bt_interface.connect(self._mac) as connection:
name = connection.read_handle(_HANDLE_READ_NAME) # pylint: disable=no-member
if not name:
raise BluetoothBackendException("Could not read NAME using handle %s"
" from Mi Temp sensor %s" % (hex(_HANDLE_READ_NAME), self._mac))
return ''.join(chr(n) for n in name) | [
"def",
"name",
"(",
"self",
")",
":",
"with",
"self",
".",
"_bt_interface",
".",
"connect",
"(",
"self",
".",
"_mac",
")",
"as",
"connection",
":",
"name",
"=",
"connection",
".",
"read_handle",
"(",
"_HANDLE_READ_NAME",
")",
"# pylint: disable=no-member",
"... | Return the name of the sensor. | [
"Return",
"the",
"name",
"of",
"the",
"sensor",
"."
] | bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10 | https://github.com/ratcashdev/mitemp/blob/bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10/mitemp_bt/mitemp_bt_poller.py#L45-L53 | train | 33,581 |
ratcashdev/mitemp | mitemp_bt/mitemp_bt_poller.py | MiTempBtPoller.handleNotification | def handleNotification(self, handle, raw_data): # pylint: disable=unused-argument,invalid-name
""" gets called by the bluepy backend when using wait_for_notification
"""
if raw_data is None:
return
data = raw_data.decode("utf-8").strip(' \n\t')
self._cache = data
self._check_data()
if self.cache_available():
self._last_read = datetime.now()
else:
# If a sensor doesn't work, wait 5 minutes before retrying
self._last_read = datetime.now() - self._cache_timeout + \
timedelta(seconds=300) | python | def handleNotification(self, handle, raw_data): # pylint: disable=unused-argument,invalid-name
""" gets called by the bluepy backend when using wait_for_notification
"""
if raw_data is None:
return
data = raw_data.decode("utf-8").strip(' \n\t')
self._cache = data
self._check_data()
if self.cache_available():
self._last_read = datetime.now()
else:
# If a sensor doesn't work, wait 5 minutes before retrying
self._last_read = datetime.now() - self._cache_timeout + \
timedelta(seconds=300) | [
"def",
"handleNotification",
"(",
"self",
",",
"handle",
",",
"raw_data",
")",
":",
"# pylint: disable=unused-argument,invalid-name",
"if",
"raw_data",
"is",
"None",
":",
"return",
"data",
"=",
"raw_data",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"strip",
"(",... | gets called by the bluepy backend when using wait_for_notification | [
"gets",
"called",
"by",
"the",
"bluepy",
"backend",
"when",
"using",
"wait_for_notification"
] | bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10 | https://github.com/ratcashdev/mitemp/blob/bd6ffed5bfd9a3a52dd8a4b96e896fa79b5c5f10/mitemp_bt/mitemp_bt_poller.py#L188-L201 | train | 33,582 |
scieloorg/articlemetaapi | articlemeta/client.py | ThriftClient.add_journal | def add_journal(self, data):
"""
This method include new journals to the ArticleMeta.
data: legacy SciELO Documents JSON Type 3.
"""
journal = self.dispatcher(
'add_journal',
data,
self._admintoken
)
return json.loads(journal) | python | def add_journal(self, data):
"""
This method include new journals to the ArticleMeta.
data: legacy SciELO Documents JSON Type 3.
"""
journal = self.dispatcher(
'add_journal',
data,
self._admintoken
)
return json.loads(journal) | [
"def",
"add_journal",
"(",
"self",
",",
"data",
")",
":",
"journal",
"=",
"self",
".",
"dispatcher",
"(",
"'add_journal'",
",",
"data",
",",
"self",
".",
"_admintoken",
")",
"return",
"json",
".",
"loads",
"(",
"journal",
")"
] | This method include new journals to the ArticleMeta.
data: legacy SciELO Documents JSON Type 3. | [
"This",
"method",
"include",
"new",
"journals",
"to",
"the",
"ArticleMeta",
"."
] | 7ff87a615951bfdcc6fd535ce7f7c65065f64caa | https://github.com/scieloorg/articlemetaapi/blob/7ff87a615951bfdcc6fd535ce7f7c65065f64caa/articlemeta/client.py#L689-L702 | train | 33,583 |
mrstephenneal/pdfconduit | pdf/api/routes.py | watermark_process | def watermark_process():
"""Apply a watermark to a PDF file."""
# Redirect to watermark page that contains form
if not request.method == 'POST':
abort(403)
# Check if the post request has the file part
if 'pdf' not in request.files:
abort(403)
# Retrieve PDF file and parameters
file = request.files['pdf']
# If user does not select file, browser also submit an empty part without filename
if file.filename == '':
abort(403)
# Check if the file is an allowed file type
if not allowed_file(file.filename):
abort(403)
params = {
'address': request.form['address'],
'town': request.form['town'],
'state': request.form['state'],
}
# Save file to uploads folder
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Make uploads directory if it does not exist
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.mkdir(app.config['UPLOAD_FOLDER'])
file.save(file_path)
# Create new watermarked file and return file path
watermarked = apply_watermark(file_path, params)
return send_from_directory(app.config['UPLOAD_FOLDER'], os.path.basename(watermarked)) | python | def watermark_process():
"""Apply a watermark to a PDF file."""
# Redirect to watermark page that contains form
if not request.method == 'POST':
abort(403)
# Check if the post request has the file part
if 'pdf' not in request.files:
abort(403)
# Retrieve PDF file and parameters
file = request.files['pdf']
# If user does not select file, browser also submit an empty part without filename
if file.filename == '':
abort(403)
# Check if the file is an allowed file type
if not allowed_file(file.filename):
abort(403)
params = {
'address': request.form['address'],
'town': request.form['town'],
'state': request.form['state'],
}
# Save file to uploads folder
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Make uploads directory if it does not exist
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.mkdir(app.config['UPLOAD_FOLDER'])
file.save(file_path)
# Create new watermarked file and return file path
watermarked = apply_watermark(file_path, params)
return send_from_directory(app.config['UPLOAD_FOLDER'], os.path.basename(watermarked)) | [
"def",
"watermark_process",
"(",
")",
":",
"# Redirect to watermark page that contains form",
"if",
"not",
"request",
".",
"method",
"==",
"'POST'",
":",
"abort",
"(",
"403",
")",
"# Check if the post request has the file part",
"if",
"'pdf'",
"not",
"in",
"request",
... | Apply a watermark to a PDF file. | [
"Apply",
"a",
"watermark",
"to",
"a",
"PDF",
"file",
"."
] | 993421cc087eefefe01ff09afabd893bcc2718ec | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/api/routes.py#L39-L78 | train | 33,584 |
mrstephenneal/pdfconduit | pdf/transform/slice.py | slicer | def slicer(document, first_page=None, last_page=None, suffix='sliced', tempdir=None):
"""Slice a PDF document to remove pages."""
# Set output file name
if tempdir:
with NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False) as temp:
output = temp.name
elif suffix:
output = os.path.join(os.path.dirname(document), add_suffix(document, suffix))
else:
with NamedTemporaryFile(suffix='.pdf') as temp:
output = temp.name
# Reindex page selections for simple user input
first_page = first_page - 1 if not None else None
# Validate page range by comparing selection to number of pages in PDF document
pages = Info(document).pages
invalid = 'Number of pages: ' + str(pages) + ' ----> Page Range Input: ' + str(first_page) + '-' + str(last_page)
assert first_page <= last_page <= pages, invalid
pdf = PdfFileReader(document)
writer = PdfFileWriter()
pages = list(range(pdf.getNumPages()))[first_page:last_page]
for page in pages:
writer.addPage(pdf.getPage(page))
with open(output, 'wb') as out:
writer.write(out)
return output | python | def slicer(document, first_page=None, last_page=None, suffix='sliced', tempdir=None):
"""Slice a PDF document to remove pages."""
# Set output file name
if tempdir:
with NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False) as temp:
output = temp.name
elif suffix:
output = os.path.join(os.path.dirname(document), add_suffix(document, suffix))
else:
with NamedTemporaryFile(suffix='.pdf') as temp:
output = temp.name
# Reindex page selections for simple user input
first_page = first_page - 1 if not None else None
# Validate page range by comparing selection to number of pages in PDF document
pages = Info(document).pages
invalid = 'Number of pages: ' + str(pages) + ' ----> Page Range Input: ' + str(first_page) + '-' + str(last_page)
assert first_page <= last_page <= pages, invalid
pdf = PdfFileReader(document)
writer = PdfFileWriter()
pages = list(range(pdf.getNumPages()))[first_page:last_page]
for page in pages:
writer.addPage(pdf.getPage(page))
with open(output, 'wb') as out:
writer.write(out)
return output | [
"def",
"slicer",
"(",
"document",
",",
"first_page",
"=",
"None",
",",
"last_page",
"=",
"None",
",",
"suffix",
"=",
"'sliced'",
",",
"tempdir",
"=",
"None",
")",
":",
"# Set output file name",
"if",
"tempdir",
":",
"with",
"NamedTemporaryFile",
"(",
"suffix... | Slice a PDF document to remove pages. | [
"Slice",
"a",
"PDF",
"document",
"to",
"remove",
"pages",
"."
] | 993421cc087eefefe01ff09afabd893bcc2718ec | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/transform/slice.py#L11-L40 | train | 33,585 |
mrstephenneal/pdfconduit | pdf/utils/info.py | Info._reader | def _reader(path, password, prompt):
"""Read PDF and decrypt if encrypted."""
pdf = PdfFileReader(path) if not isinstance(path, PdfFileReader) else path
# Check that PDF is encrypted
if pdf.isEncrypted:
# Check that password is none
if not password:
pdf.decrypt('')
# Try and decrypt PDF using no password, prompt for password
if pdf.isEncrypted and prompt:
print('No password has been given for encrypted PDF ', path)
password = input('Enter Password: ')
else:
return False
pdf.decrypt(password)
return pdf | python | def _reader(path, password, prompt):
"""Read PDF and decrypt if encrypted."""
pdf = PdfFileReader(path) if not isinstance(path, PdfFileReader) else path
# Check that PDF is encrypted
if pdf.isEncrypted:
# Check that password is none
if not password:
pdf.decrypt('')
# Try and decrypt PDF using no password, prompt for password
if pdf.isEncrypted and prompt:
print('No password has been given for encrypted PDF ', path)
password = input('Enter Password: ')
else:
return False
pdf.decrypt(password)
return pdf | [
"def",
"_reader",
"(",
"path",
",",
"password",
",",
"prompt",
")",
":",
"pdf",
"=",
"PdfFileReader",
"(",
"path",
")",
"if",
"not",
"isinstance",
"(",
"path",
",",
"PdfFileReader",
")",
"else",
"path",
"# Check that PDF is encrypted",
"if",
"pdf",
".",
"i... | Read PDF and decrypt if encrypted. | [
"Read",
"PDF",
"and",
"decrypt",
"if",
"encrypted",
"."
] | 993421cc087eefefe01ff09afabd893bcc2718ec | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/utils/info.py#L10-L25 | train | 33,586 |
mrstephenneal/pdfconduit | pdf/utils/info.py | Info._resolved_objects | def _resolved_objects(pdf, xobject):
"""Retrieve rotatation info."""
return [pdf.getPage(i).get(xobject) for i in range(pdf.getNumPages())][0] | python | def _resolved_objects(pdf, xobject):
"""Retrieve rotatation info."""
return [pdf.getPage(i).get(xobject) for i in range(pdf.getNumPages())][0] | [
"def",
"_resolved_objects",
"(",
"pdf",
",",
"xobject",
")",
":",
"return",
"[",
"pdf",
".",
"getPage",
"(",
"i",
")",
".",
"get",
"(",
"xobject",
")",
"for",
"i",
"in",
"range",
"(",
"pdf",
".",
"getNumPages",
"(",
")",
")",
"]",
"[",
"0",
"]"
] | Retrieve rotatation info. | [
"Retrieve",
"rotatation",
"info",
"."
] | 993421cc087eefefe01ff09afabd893bcc2718ec | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/utils/info.py#L28-L30 | train | 33,587 |
mrstephenneal/pdfconduit | pdf/utils/info.py | Info.resources | def resources(self):
"""Retrieve contents of each page of PDF"""
return [self.pdf.getPage(i) for i in range(self.pdf.getNumPages())] | python | def resources(self):
"""Retrieve contents of each page of PDF"""
return [self.pdf.getPage(i) for i in range(self.pdf.getNumPages())] | [
"def",
"resources",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"pdf",
".",
"getPage",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"pdf",
".",
"getNumPages",
"(",
")",
")",
"]"
] | Retrieve contents of each page of PDF | [
"Retrieve",
"contents",
"of",
"each",
"page",
"of",
"PDF"
] | 993421cc087eefefe01ff09afabd893bcc2718ec | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/utils/info.py#L52-L54 | train | 33,588 |
mrstephenneal/pdfconduit | pdf/utils/info.py | Info.security | def security(self):
"""Print security object information for a pdf document"""
return {k: v for i in self.pdf.resolvedObjects.items() for k, v in i[1].items()} | python | def security(self):
"""Print security object information for a pdf document"""
return {k: v for i in self.pdf.resolvedObjects.items() for k, v in i[1].items()} | [
"def",
"security",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"i",
"in",
"self",
".",
"pdf",
".",
"resolvedObjects",
".",
"items",
"(",
")",
"for",
"k",
",",
"v",
"in",
"i",
"[",
"1",
"]",
".",
"items",
"(",
")",
"}"
] | Print security object information for a pdf document | [
"Print",
"security",
"object",
"information",
"for",
"a",
"pdf",
"document"
] | 993421cc087eefefe01ff09afabd893bcc2718ec | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/utils/info.py#L57-L59 | train | 33,589 |
datadesk/python-documentcloud | documentcloud/__init__.py | BaseDocumentCloudClient._make_request | def _make_request(self, url, params=None, opener=None):
"""
Configure a HTTP request, fire it off and return the response.
"""
# Create the request object
args = [i for i in [url, params] if i]
request = urllib.request.Request(*args)
# If the client has credentials, include them as a header
if self.username and self.password:
credentials = '%s:%s' % (self.username, self.password)
encoded_credentials = base64.encodestring(
credentials.encode("utf-8")
).decode("utf-8").replace("\n", "")
header = 'Basic %s' % encoded_credentials
request.add_header('Authorization', header)
# If the request provides a custom opener, like the upload request,
# which relies on a multipart request, it is applied here.
if opener:
opener = urllib.request.build_opener(opener)
request_method = opener.open
else:
request_method = urllib.request.urlopen
# Make the request
try:
response = request_method(request)
except Exception:
e = sys.exc_info()[1]
if getattr(e, 'code', None) == 404:
raise DoesNotExistError("The resource you've requested does \
not exist or is unavailable without the proper credentials.")
elif getattr(e, 'code', None) == 401:
raise CredentialsFailedError("The resource you've requested \
requires proper credentials.")
else:
raise e
# Read the response and return it
return response.read() | python | def _make_request(self, url, params=None, opener=None):
"""
Configure a HTTP request, fire it off and return the response.
"""
# Create the request object
args = [i for i in [url, params] if i]
request = urllib.request.Request(*args)
# If the client has credentials, include them as a header
if self.username and self.password:
credentials = '%s:%s' % (self.username, self.password)
encoded_credentials = base64.encodestring(
credentials.encode("utf-8")
).decode("utf-8").replace("\n", "")
header = 'Basic %s' % encoded_credentials
request.add_header('Authorization', header)
# If the request provides a custom opener, like the upload request,
# which relies on a multipart request, it is applied here.
if opener:
opener = urllib.request.build_opener(opener)
request_method = opener.open
else:
request_method = urllib.request.urlopen
# Make the request
try:
response = request_method(request)
except Exception:
e = sys.exc_info()[1]
if getattr(e, 'code', None) == 404:
raise DoesNotExistError("The resource you've requested does \
not exist or is unavailable without the proper credentials.")
elif getattr(e, 'code', None) == 401:
raise CredentialsFailedError("The resource you've requested \
requires proper credentials.")
else:
raise e
# Read the response and return it
return response.read() | [
"def",
"_make_request",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"opener",
"=",
"None",
")",
":",
"# Create the request object",
"args",
"=",
"[",
"i",
"for",
"i",
"in",
"[",
"url",
",",
"params",
"]",
"if",
"i",
"]",
"request",
"=",... | Configure a HTTP request, fire it off and return the response. | [
"Configure",
"a",
"HTTP",
"request",
"fire",
"it",
"off",
"and",
"return",
"the",
"response",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L53-L89 | train | 33,590 |
datadesk/python-documentcloud | documentcloud/__init__.py | BaseDocumentCloudClient.put | def put(self, method, params):
"""
Post changes back to DocumentCloud
"""
# Prepare the params, first by adding a custom command to
# simulate a PUT request even though we are actually POSTing.
# This is something DocumentCloud expects.
params['_method'] = 'put'
# Some special case handling of the document_ids list, if it exists
if params.get("document_ids", None):
# Pull the document_ids out of the params
document_ids = params.get("document_ids")
del params['document_ids']
params = urllib.parse.urlencode(params, doseq=True)
# These need to be specially formatted in the style documentcloud
# expects arrays. The example they provide is:
# ?document_ids[]=28-boumediene&document_ids[]=\
# 207-academy&document_ids[]=30-insider-trading
params += "".join([
'&document_ids[]=%s' % id for id in document_ids
])
# More special case handler of key/value data tags, if they exist
elif params.get("data", None):
# Pull them out of the dict
data = params.get("data")
del params['data']
params = urllib.parse.urlencode(params, doseq=True)
# Format them in the style documentcloud expects
# ?data['foo']=bar&data['tit']=tat
params += "".join([
'&data[%s]=%s' % (
urllib.parse.quote_plus(key.encode("utf-8")),
urllib.parse.quote_plus(value.encode("utf-8"))
) for key, value in
data.items()
])
else:
# Otherwise, we can just use the vanilla urllib prep method
params = urllib.parse.urlencode(params, doseq=True)
# Make the request
self._make_request(
self.BASE_URI + method,
params.encode("utf-8"),
) | python | def put(self, method, params):
"""
Post changes back to DocumentCloud
"""
# Prepare the params, first by adding a custom command to
# simulate a PUT request even though we are actually POSTing.
# This is something DocumentCloud expects.
params['_method'] = 'put'
# Some special case handling of the document_ids list, if it exists
if params.get("document_ids", None):
# Pull the document_ids out of the params
document_ids = params.get("document_ids")
del params['document_ids']
params = urllib.parse.urlencode(params, doseq=True)
# These need to be specially formatted in the style documentcloud
# expects arrays. The example they provide is:
# ?document_ids[]=28-boumediene&document_ids[]=\
# 207-academy&document_ids[]=30-insider-trading
params += "".join([
'&document_ids[]=%s' % id for id in document_ids
])
# More special case handler of key/value data tags, if they exist
elif params.get("data", None):
# Pull them out of the dict
data = params.get("data")
del params['data']
params = urllib.parse.urlencode(params, doseq=True)
# Format them in the style documentcloud expects
# ?data['foo']=bar&data['tit']=tat
params += "".join([
'&data[%s]=%s' % (
urllib.parse.quote_plus(key.encode("utf-8")),
urllib.parse.quote_plus(value.encode("utf-8"))
) for key, value in
data.items()
])
else:
# Otherwise, we can just use the vanilla urllib prep method
params = urllib.parse.urlencode(params, doseq=True)
# Make the request
self._make_request(
self.BASE_URI + method,
params.encode("utf-8"),
) | [
"def",
"put",
"(",
"self",
",",
"method",
",",
"params",
")",
":",
"# Prepare the params, first by adding a custom command to",
"# simulate a PUT request even though we are actually POSTing.",
"# This is something DocumentCloud expects.",
"params",
"[",
"'_method'",
"]",
"=",
"'p... | Post changes back to DocumentCloud | [
"Post",
"changes",
"back",
"to",
"DocumentCloud"
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L92-L136 | train | 33,591 |
datadesk/python-documentcloud | documentcloud/__init__.py | BaseDocumentCloudClient.fetch | def fetch(self, method, params=None):
"""
Fetch an url.
"""
# Encode params if they exist
if params:
params = urllib.parse.urlencode(params, doseq=True).encode("utf-8")
content = self._make_request(
self.BASE_URI + method,
params,
)
# Convert its JSON to a Python dictionary and return
return json.loads(content.decode("utf-8")) | python | def fetch(self, method, params=None):
"""
Fetch an url.
"""
# Encode params if they exist
if params:
params = urllib.parse.urlencode(params, doseq=True).encode("utf-8")
content = self._make_request(
self.BASE_URI + method,
params,
)
# Convert its JSON to a Python dictionary and return
return json.loads(content.decode("utf-8")) | [
"def",
"fetch",
"(",
"self",
",",
"method",
",",
"params",
"=",
"None",
")",
":",
"# Encode params if they exist",
"if",
"params",
":",
"params",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"params",
",",
"doseq",
"=",
"True",
")",
".",
"encode",... | Fetch an url. | [
"Fetch",
"an",
"url",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L138-L150 | train | 33,592 |
datadesk/python-documentcloud | documentcloud/__init__.py | DocumentClient._get_search_page | def _get_search_page(
self,
query,
page,
per_page=1000,
mentions=3,
data=False,
):
"""
Retrieve one page of search results from the DocumentCloud API.
"""
if mentions > 10:
raise ValueError("You cannot search for more than 10 mentions")
params = {
'q': query,
'page': page,
'per_page': per_page,
'mentions': mentions,
}
if data:
params['data'] = 'true'
response = self.fetch('search.json', params)
return response.get("documents") | python | def _get_search_page(
self,
query,
page,
per_page=1000,
mentions=3,
data=False,
):
"""
Retrieve one page of search results from the DocumentCloud API.
"""
if mentions > 10:
raise ValueError("You cannot search for more than 10 mentions")
params = {
'q': query,
'page': page,
'per_page': per_page,
'mentions': mentions,
}
if data:
params['data'] = 'true'
response = self.fetch('search.json', params)
return response.get("documents") | [
"def",
"_get_search_page",
"(",
"self",
",",
"query",
",",
"page",
",",
"per_page",
"=",
"1000",
",",
"mentions",
"=",
"3",
",",
"data",
"=",
"False",
",",
")",
":",
"if",
"mentions",
">",
"10",
":",
"raise",
"ValueError",
"(",
"\"You cannot search for m... | Retrieve one page of search results from the DocumentCloud API. | [
"Retrieve",
"one",
"page",
"of",
"search",
"results",
"from",
"the",
"DocumentCloud",
"API",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L195-L217 | train | 33,593 |
datadesk/python-documentcloud | documentcloud/__init__.py | DocumentClient.search | def search(self, query, page=None, per_page=1000, mentions=3, data=False):
"""
Retrieve all objects that make a search query.
Will loop through all pages that match unless you provide
the number of pages you'd like to restrict the search to.
Example usage:
>> documentcloud.documents.search('salazar')
"""
# If the user provides a page, search it and stop there
if page:
document_list = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
# If the user doesn't provide a page keep looping until you have
# everything
else:
page = 1
document_list = []
# Loop through all the search pages and fetch everything
while True:
results = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
if results:
document_list += results
page += 1
else:
break
# Convert the JSON objects from the API into Python objects
obj_list = []
for doc in document_list:
doc['_connection'] = self._connection
obj = Document(doc)
obj_list.append(obj)
# Pass it back out
return obj_list | python | def search(self, query, page=None, per_page=1000, mentions=3, data=False):
"""
Retrieve all objects that make a search query.
Will loop through all pages that match unless you provide
the number of pages you'd like to restrict the search to.
Example usage:
>> documentcloud.documents.search('salazar')
"""
# If the user provides a page, search it and stop there
if page:
document_list = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
# If the user doesn't provide a page keep looping until you have
# everything
else:
page = 1
document_list = []
# Loop through all the search pages and fetch everything
while True:
results = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
if results:
document_list += results
page += 1
else:
break
# Convert the JSON objects from the API into Python objects
obj_list = []
for doc in document_list:
doc['_connection'] = self._connection
obj = Document(doc)
obj_list.append(obj)
# Pass it back out
return obj_list | [
"def",
"search",
"(",
"self",
",",
"query",
",",
"page",
"=",
"None",
",",
"per_page",
"=",
"1000",
",",
"mentions",
"=",
"3",
",",
"data",
"=",
"False",
")",
":",
"# If the user provides a page, search it and stop there",
"if",
"page",
":",
"document_list",
... | Retrieve all objects that make a search query.
Will loop through all pages that match unless you provide
the number of pages you'd like to restrict the search to.
Example usage:
>> documentcloud.documents.search('salazar') | [
"Retrieve",
"all",
"objects",
"that",
"make",
"a",
"search",
"query",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L219-L265 | train | 33,594 |
datadesk/python-documentcloud | documentcloud/__init__.py | DocumentClient.get | def get(self, id):
"""
Retrieve a particular document using it's unique identifier.
Example usage:
>> documentcloud.documents.get('71072-oir-final-report')
"""
data = self.fetch('documents/%s.json' % id).get("document")
data['_connection'] = self._connection
return Document(data) | python | def get(self, id):
"""
Retrieve a particular document using it's unique identifier.
Example usage:
>> documentcloud.documents.get('71072-oir-final-report')
"""
data = self.fetch('documents/%s.json' % id).get("document")
data['_connection'] = self._connection
return Document(data) | [
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"data",
"=",
"self",
".",
"fetch",
"(",
"'documents/%s.json'",
"%",
"id",
")",
".",
"get",
"(",
"\"document\"",
")",
"data",
"[",
"'_connection'",
"]",
"=",
"self",
".",
"_connection",
"return",
"Document... | Retrieve a particular document using it's unique identifier.
Example usage:
>> documentcloud.documents.get('71072-oir-final-report') | [
"Retrieve",
"a",
"particular",
"document",
"using",
"it",
"s",
"unique",
"identifier",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L267-L277 | train | 33,595 |
datadesk/python-documentcloud | documentcloud/__init__.py | DocumentClient.upload | def upload(
self, pdf, title=None, source=None, description=None,
related_article=None, published_url=None, access='private',
project=None, data=None, secure=False, force_ocr=False
):
"""
Upload a PDF or other image file to DocumentCloud.
You can submit either a pdf opened as a file object or a path
to a pdf file.
Example usage:
# From a file path
>> documentcloud.documents.upload(
>> "/home/ben/sample.pdf",
>> "sample title"
>>)
# From a file object
>> pdf = open(path, 'rb')
>> documentcloud.documents.upload(pdf, "sample title")
Returns the document that's created as a Document object.
Based on code developed by Mitchell Kotler and
refined by Christopher Groskopf.
"""
# Required pdf parameter
if hasattr(pdf, 'read'):
try:
size = os.fstat(pdf.fileno()).st_size
except Exception:
size = 0
params = {'file': pdf}
opener = MultipartPostHandler
elif self.is_url(pdf):
size = 0
params = {'file': pdf}
opener = PostHandler # URL uploads don't need MultiPart
else:
size = os.path.getsize(pdf)
params = {'file': open(pdf, 'rb')}
opener = MultipartPostHandler
# Enforce file size limit of the DocumentCloud API
if size >= 399999999:
raise ValueError("The pdf you have submitted is over the \
DocumentCloud API's 400MB file size limit. Split it into smaller pieces \
and try again.")
# Optional parameters
if title:
params['title'] = title
else:
# Set it to the file name
if hasattr(pdf, 'read'):
params['title'] = pdf.name.split(os.sep)[-1].split(".")[0]
else:
params['title'] = pdf.split(os.sep)[-1].split(".")[0]
if source:
params['source'] = source
if description:
params['description'] = description
if related_article:
params['related_article'] = related_article
if published_url:
params['published_url'] = published_url
if access:
params['access'] = access
if project:
params['project'] = project
if data:
for key, value in list(data.items()):
is_valid_data_keyword(key)
params['data[%s]' % key] = value
if secure:
params['secure'] = 'true'
if force_ocr:
params['force_ocr'] = 'true'
# Make the request
response = self._make_request(
self.BASE_URI + 'upload.json',
params,
opener=opener
)
# Pull the id from the response
response_id = json.loads(response.decode("utf-8"))['id'].split("-")[0]
# Get the document and return it
return self.get(response_id) | python | def upload(
self, pdf, title=None, source=None, description=None,
related_article=None, published_url=None, access='private',
project=None, data=None, secure=False, force_ocr=False
):
"""
Upload a PDF or other image file to DocumentCloud.
You can submit either a pdf opened as a file object or a path
to a pdf file.
Example usage:
# From a file path
>> documentcloud.documents.upload(
>> "/home/ben/sample.pdf",
>> "sample title"
>>)
# From a file object
>> pdf = open(path, 'rb')
>> documentcloud.documents.upload(pdf, "sample title")
Returns the document that's created as a Document object.
Based on code developed by Mitchell Kotler and
refined by Christopher Groskopf.
"""
# Required pdf parameter
if hasattr(pdf, 'read'):
try:
size = os.fstat(pdf.fileno()).st_size
except Exception:
size = 0
params = {'file': pdf}
opener = MultipartPostHandler
elif self.is_url(pdf):
size = 0
params = {'file': pdf}
opener = PostHandler # URL uploads don't need MultiPart
else:
size = os.path.getsize(pdf)
params = {'file': open(pdf, 'rb')}
opener = MultipartPostHandler
# Enforce file size limit of the DocumentCloud API
if size >= 399999999:
raise ValueError("The pdf you have submitted is over the \
DocumentCloud API's 400MB file size limit. Split it into smaller pieces \
and try again.")
# Optional parameters
if title:
params['title'] = title
else:
# Set it to the file name
if hasattr(pdf, 'read'):
params['title'] = pdf.name.split(os.sep)[-1].split(".")[0]
else:
params['title'] = pdf.split(os.sep)[-1].split(".")[0]
if source:
params['source'] = source
if description:
params['description'] = description
if related_article:
params['related_article'] = related_article
if published_url:
params['published_url'] = published_url
if access:
params['access'] = access
if project:
params['project'] = project
if data:
for key, value in list(data.items()):
is_valid_data_keyword(key)
params['data[%s]' % key] = value
if secure:
params['secure'] = 'true'
if force_ocr:
params['force_ocr'] = 'true'
# Make the request
response = self._make_request(
self.BASE_URI + 'upload.json',
params,
opener=opener
)
# Pull the id from the response
response_id = json.loads(response.decode("utf-8"))['id'].split("-")[0]
# Get the document and return it
return self.get(response_id) | [
"def",
"upload",
"(",
"self",
",",
"pdf",
",",
"title",
"=",
"None",
",",
"source",
"=",
"None",
",",
"description",
"=",
"None",
",",
"related_article",
"=",
"None",
",",
"published_url",
"=",
"None",
",",
"access",
"=",
"'private'",
",",
"project",
"... | Upload a PDF or other image file to DocumentCloud.
You can submit either a pdf opened as a file object or a path
to a pdf file.
Example usage:
# From a file path
>> documentcloud.documents.upload(
>> "/home/ben/sample.pdf",
>> "sample title"
>>)
# From a file object
>> pdf = open(path, 'rb')
>> documentcloud.documents.upload(pdf, "sample title")
Returns the document that's created as a Document object.
Based on code developed by Mitchell Kotler and
refined by Christopher Groskopf. | [
"Upload",
"a",
"PDF",
"or",
"other",
"image",
"file",
"to",
"DocumentCloud",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L280-L367 | train | 33,596 |
datadesk/python-documentcloud | documentcloud/__init__.py | DocumentClient.upload_directory | def upload_directory(
self, path, source=None, description=None,
related_article=None, published_url=None, access='private',
project=None, data=None, secure=False, force_ocr=False
):
"""
Uploads all the PDFs in the provided directory.
Example usage:
>> documentcloud.documents.upload_directory("/home/ben/pdfs/")
Returns a list of the documents created during the upload.
Based on code developed by Mitchell Kotler and refined
by Christopher Groskopf.
"""
# Loop through the path and get all the files
path_list = []
for (dirpath, dirname, filenames) in os.walk(path):
path_list.extend([
os.path.join(dirpath, i) for i in filenames
if i.lower().endswith(".pdf")
])
# Upload all the pdfs
obj_list = []
for pdf_path in path_list:
obj = self.upload(
pdf_path, source=source, description=description,
related_article=related_article, published_url=published_url,
access=access, project=project, data=data, secure=secure,
force_ocr=force_ocr
)
obj_list.append(obj)
# Pass back the list of documents
return obj_list | python | def upload_directory(
self, path, source=None, description=None,
related_article=None, published_url=None, access='private',
project=None, data=None, secure=False, force_ocr=False
):
"""
Uploads all the PDFs in the provided directory.
Example usage:
>> documentcloud.documents.upload_directory("/home/ben/pdfs/")
Returns a list of the documents created during the upload.
Based on code developed by Mitchell Kotler and refined
by Christopher Groskopf.
"""
# Loop through the path and get all the files
path_list = []
for (dirpath, dirname, filenames) in os.walk(path):
path_list.extend([
os.path.join(dirpath, i) for i in filenames
if i.lower().endswith(".pdf")
])
# Upload all the pdfs
obj_list = []
for pdf_path in path_list:
obj = self.upload(
pdf_path, source=source, description=description,
related_article=related_article, published_url=published_url,
access=access, project=project, data=data, secure=secure,
force_ocr=force_ocr
)
obj_list.append(obj)
# Pass back the list of documents
return obj_list | [
"def",
"upload_directory",
"(",
"self",
",",
"path",
",",
"source",
"=",
"None",
",",
"description",
"=",
"None",
",",
"related_article",
"=",
"None",
",",
"published_url",
"=",
"None",
",",
"access",
"=",
"'private'",
",",
"project",
"=",
"None",
",",
"... | Uploads all the PDFs in the provided directory.
Example usage:
>> documentcloud.documents.upload_directory("/home/ben/pdfs/")
Returns a list of the documents created during the upload.
Based on code developed by Mitchell Kotler and refined
by Christopher Groskopf. | [
"Uploads",
"all",
"the",
"PDFs",
"in",
"the",
"provided",
"directory",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L370-L405 | train | 33,597 |
datadesk/python-documentcloud | documentcloud/__init__.py | ProjectClient.all | def all(self):
"""
Retrieve all your projects. Requires authentication.
Example usage:
>> documentcloud.projects.all()
"""
project_list = self.fetch('projects.json').get("projects")
obj_list = []
for proj in project_list:
proj['_connection'] = self._connection
proj = Project(proj)
obj_list.append(proj)
return obj_list | python | def all(self):
"""
Retrieve all your projects. Requires authentication.
Example usage:
>> documentcloud.projects.all()
"""
project_list = self.fetch('projects.json').get("projects")
obj_list = []
for proj in project_list:
proj['_connection'] = self._connection
proj = Project(proj)
obj_list.append(proj)
return obj_list | [
"def",
"all",
"(",
"self",
")",
":",
"project_list",
"=",
"self",
".",
"fetch",
"(",
"'projects.json'",
")",
".",
"get",
"(",
"\"projects\"",
")",
"obj_list",
"=",
"[",
"]",
"for",
"proj",
"in",
"project_list",
":",
"proj",
"[",
"'_connection'",
"]",
"... | Retrieve all your projects. Requires authentication.
Example usage:
>> documentcloud.projects.all() | [
"Retrieve",
"all",
"your",
"projects",
".",
"Requires",
"authentication",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L430-L444 | train | 33,598 |
datadesk/python-documentcloud | documentcloud/__init__.py | ProjectClient.get | def get(self, id=None, title=None):
"""
Retrieve a particular project using its unique identifier or
it's title.
But not both.
Example usage:
>> documentcloud.projects.get('arizona-shootings')
"""
# Make sure the kwargs are kosher
if id and title:
raise ValueError("You can only retrieve a Project by id or \
title, not by both")
elif not id and not title:
raise ValueError("You must provide an id or a title to \
make a request.")
# Pull the hits
if id:
hit_list = [i for i in self.all() if str(i.id) == str(id)]
elif title:
hit_list = [
i for i in self.all() if
i.title.lower().strip() == title.lower().strip()
]
# Throw an error if there's more than one hit.
if len(hit_list) > 1:
raise DuplicateObjectError("There is more than one project that \
matches your request.")
# Try to pull the first hit
try:
return hit_list[0]
except IndexError:
# If it's not there, you know to throw this error.
raise DoesNotExistError("The resource you've requested does not \
exist or is unavailable without the proper credentials.") | python | def get(self, id=None, title=None):
"""
Retrieve a particular project using its unique identifier or
it's title.
But not both.
Example usage:
>> documentcloud.projects.get('arizona-shootings')
"""
# Make sure the kwargs are kosher
if id and title:
raise ValueError("You can only retrieve a Project by id or \
title, not by both")
elif not id and not title:
raise ValueError("You must provide an id or a title to \
make a request.")
# Pull the hits
if id:
hit_list = [i for i in self.all() if str(i.id) == str(id)]
elif title:
hit_list = [
i for i in self.all() if
i.title.lower().strip() == title.lower().strip()
]
# Throw an error if there's more than one hit.
if len(hit_list) > 1:
raise DuplicateObjectError("There is more than one project that \
matches your request.")
# Try to pull the first hit
try:
return hit_list[0]
except IndexError:
# If it's not there, you know to throw this error.
raise DoesNotExistError("The resource you've requested does not \
exist or is unavailable without the proper credentials.") | [
"def",
"get",
"(",
"self",
",",
"id",
"=",
"None",
",",
"title",
"=",
"None",
")",
":",
"# Make sure the kwargs are kosher",
"if",
"id",
"and",
"title",
":",
"raise",
"ValueError",
"(",
"\"You can only retrieve a Project by id or \\\n title, not by both\"... | Retrieve a particular project using its unique identifier or
it's title.
But not both.
Example usage:
>> documentcloud.projects.get('arizona-shootings') | [
"Retrieve",
"a",
"particular",
"project",
"using",
"its",
"unique",
"identifier",
"or",
"it",
"s",
"title",
"."
] | 0d7f42cbf1edf5c61fca37ed846362cba4abfd76 | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L446-L482 | train | 33,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.