id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
7,200
mdavidsaver/p4p
src/p4p/nt/enum.py
NTEnum.assign
def assign(self, V, py): """Store python value in Value """ if isinstance(py, (bytes, unicode)): for i,C in enumerate(V['value.choices'] or self._choices): if py==C: V['value.index'] = i return # attempt to parse as integer V['value.index'] = py
python
def assign(self, V, py): if isinstance(py, (bytes, unicode)): for i,C in enumerate(V['value.choices'] or self._choices): if py==C: V['value.index'] = i return # attempt to parse as integer V['value.index'] = py
[ "def", "assign", "(", "self", ",", "V", ",", "py", ")", ":", "if", "isinstance", "(", "py", ",", "(", "bytes", ",", "unicode", ")", ")", ":", "for", "i", ",", "C", "in", "enumerate", "(", "V", "[", "'value.choices'", "]", "or", "self", ".", "_c...
Store python value in Value
[ "Store", "python", "value", "in", "Value" ]
c5e45eac01edfdad9cc2857bc283c7f2695802b8
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/enum.py#L94-L104
7,201
mdavidsaver/p4p
src/p4p/disect.py
periodic
def periodic(period=60.0, file=sys.stderr): """Start a daemon thread which will periodically print GC stats :param period: Update period in seconds :param file: A writable file-like object """ import threading import time S = _StatsThread(period=period, file=file) T = threading.Thread(target=S) T.daemon = True T.start()
python
def periodic(period=60.0, file=sys.stderr): import threading import time S = _StatsThread(period=period, file=file) T = threading.Thread(target=S) T.daemon = True T.start()
[ "def", "periodic", "(", "period", "=", "60.0", ",", "file", "=", "sys", ".", "stderr", ")", ":", "import", "threading", "import", "time", "S", "=", "_StatsThread", "(", "period", "=", "period", ",", "file", "=", "file", ")", "T", "=", "threading", "....
Start a daemon thread which will periodically print GC stats :param period: Update period in seconds :param file: A writable file-like object
[ "Start", "a", "daemon", "thread", "which", "will", "periodically", "print", "GC", "stats" ]
c5e45eac01edfdad9cc2857bc283c7f2695802b8
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/disect.py#L142-L153
7,202
mdavidsaver/p4p
src/p4p/disect.py
StatsDelta.collect
def collect(self, file=sys.stderr): """Collect stats and print results to file :param file: A writable file-like object """ cur = gcstats() Ncur = len(cur) if self.stats is not None and file is not None: prev = self.stats Nprev = self.ntypes # may be less than len(prev) if Ncur != Nprev: print("# Types %d -> %d" % (Nprev, Ncur), file=file) Scur, Sprev, first = set(cur), set(prev), True for T in Scur - Sprev: # new types if first: print('New Types', file=file) first = False print(' ', T, cur[T], file=file) first = True for T in Sprev - Scur: # collected types if first: print('Cleaned Types', file=file) first = False print(' ', T, -prev[T], file=file) first = True for T in Scur & Sprev: if cur[T] == prev[T]: continue if first: print('Known Types', file=file) first = False print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file) else: # first call print("All Types", file=file) for T, C in cur.items(): print(' ', T, C, file=file) self.stats, self.ntypes = cur, len(cur)
python
def collect(self, file=sys.stderr): cur = gcstats() Ncur = len(cur) if self.stats is not None and file is not None: prev = self.stats Nprev = self.ntypes # may be less than len(prev) if Ncur != Nprev: print("# Types %d -> %d" % (Nprev, Ncur), file=file) Scur, Sprev, first = set(cur), set(prev), True for T in Scur - Sprev: # new types if first: print('New Types', file=file) first = False print(' ', T, cur[T], file=file) first = True for T in Sprev - Scur: # collected types if first: print('Cleaned Types', file=file) first = False print(' ', T, -prev[T], file=file) first = True for T in Scur & Sprev: if cur[T] == prev[T]: continue if first: print('Known Types', file=file) first = False print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file) else: # first call print("All Types", file=file) for T, C in cur.items(): print(' ', T, C, file=file) self.stats, self.ntypes = cur, len(cur)
[ "def", "collect", "(", "self", ",", "file", "=", "sys", ".", "stderr", ")", ":", "cur", "=", "gcstats", "(", ")", "Ncur", "=", "len", "(", "cur", ")", "if", "self", ".", "stats", "is", "not", "None", "and", "file", "is", "not", "None", ":", "pr...
Collect stats and print results to file :param file: A writable file-like object
[ "Collect", "stats", "and", "print", "results", "to", "file" ]
c5e45eac01edfdad9cc2857bc283c7f2695802b8
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/disect.py#L34-L76
7,203
eyeseast/propublica-congress
congress/members.py
MembersClient.filter
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs): """ Takes a chamber and Congress, OR state and district, returning a list of members """ check_chamber(chamber) kwargs.update(chamber=chamber, congress=congress) if 'state' in kwargs and 'district' in kwargs: path = ("members/{chamber}/{state}/{district}/" "current.json").format(**kwargs) elif 'state' in kwargs: path = ("members/{chamber}/{state}/" "current.json").format(**kwargs) else: path = ("{congress}/{chamber}/" "members.json").format(**kwargs) return self.fetch(path, parse=lambda r: r['results'])
python
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs): check_chamber(chamber) kwargs.update(chamber=chamber, congress=congress) if 'state' in kwargs and 'district' in kwargs: path = ("members/{chamber}/{state}/{district}/" "current.json").format(**kwargs) elif 'state' in kwargs: path = ("members/{chamber}/{state}/" "current.json").format(**kwargs) else: path = ("{congress}/{chamber}/" "members.json").format(**kwargs) return self.fetch(path, parse=lambda r: r['results'])
[ "def", "filter", "(", "self", ",", "chamber", ",", "congress", "=", "CURRENT_CONGRESS", ",", "*", "*", "kwargs", ")", ":", "check_chamber", "(", "chamber", ")", "kwargs", ".", "update", "(", "chamber", "=", "chamber", ",", "congress", "=", "congress", ")...
Takes a chamber and Congress, OR state and district, returning a list of members
[ "Takes", "a", "chamber", "and", "Congress", "OR", "state", "and", "district", "returning", "a", "list", "of", "members" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L12-L33
7,204
eyeseast/propublica-congress
congress/members.py
MembersClient.bills
def bills(self, member_id, type='introduced'): "Same as BillsClient.by_member" path = "members/{0}/bills/{1}.json".format(member_id, type) return self.fetch(path)
python
def bills(self, member_id, type='introduced'): "Same as BillsClient.by_member" path = "members/{0}/bills/{1}.json".format(member_id, type) return self.fetch(path)
[ "def", "bills", "(", "self", ",", "member_id", ",", "type", "=", "'introduced'", ")", ":", "path", "=", "\"members/{0}/bills/{1}.json\"", ".", "format", "(", "member_id", ",", "type", ")", "return", "self", ".", "fetch", "(", "path", ")" ]
Same as BillsClient.by_member
[ "Same", "as", "BillsClient", ".", "by_member" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L35-L38
7,205
eyeseast/propublica-congress
congress/members.py
MembersClient.compare
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS): """ See how often two members voted together in a given Congress. Takes two member IDs, a chamber and a Congress number. """ check_chamber(chamber) path = "members/{first}/{type}/{second}/{congress}/{chamber}.json" path = path.format(first=first, second=second, type=type, congress=congress, chamber=chamber) return self.fetch(path)
python
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS): check_chamber(chamber) path = "members/{first}/{type}/{second}/{congress}/{chamber}.json" path = path.format(first=first, second=second, type=type, congress=congress, chamber=chamber) return self.fetch(path)
[ "def", "compare", "(", "self", ",", "first", ",", "second", ",", "chamber", ",", "type", "=", "'votes'", ",", "congress", "=", "CURRENT_CONGRESS", ")", ":", "check_chamber", "(", "chamber", ")", "path", "=", "\"members/{first}/{type}/{second}/{congress}/{chamber}....
See how often two members voted together in a given Congress. Takes two member IDs, a chamber and a Congress number.
[ "See", "how", "often", "two", "members", "voted", "together", "in", "a", "given", "Congress", ".", "Takes", "two", "member", "IDs", "a", "chamber", "and", "a", "Congress", "number", "." ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L51-L60
7,206
eyeseast/propublica-congress
congress/bills.py
BillsClient.upcoming
def upcoming(self, chamber, congress=CURRENT_CONGRESS): "Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
python
def upcoming(self, chamber, congress=CURRENT_CONGRESS): "Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
[ "def", "upcoming", "(", "self", ",", "chamber", ",", "congress", "=", "CURRENT_CONGRESS", ")", ":", "path", "=", "\"bills/upcoming/{chamber}.json\"", ".", "format", "(", "chamber", "=", "chamber", ")", "return", "self", ".", "fetch", "(", "path", ")" ]
Shortcut for upcoming bills
[ "Shortcut", "for", "upcoming", "bills" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/bills.py#L66-L69
7,207
eyeseast/propublica-congress
congress/votes.py
VotesClient.by_month
def by_month(self, chamber, year=None, month=None): """ Return votes for a single month, defaulting to the current month. """ check_chamber(chamber) now = datetime.datetime.now() year = year or now.year month = month or now.month path = "{chamber}/votes/{year}/{month}.json".format( chamber=chamber, year=year, month=month) return self.fetch(path, parse=lambda r: r['results'])
python
def by_month(self, chamber, year=None, month=None): check_chamber(chamber) now = datetime.datetime.now() year = year or now.year month = month or now.month path = "{chamber}/votes/{year}/{month}.json".format( chamber=chamber, year=year, month=month) return self.fetch(path, parse=lambda r: r['results'])
[ "def", "by_month", "(", "self", ",", "chamber", ",", "year", "=", "None", ",", "month", "=", "None", ")", ":", "check_chamber", "(", "chamber", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "year", "=", "year", "or", "now", "....
Return votes for a single month, defaulting to the current month.
[ "Return", "votes", "for", "a", "single", "month", "defaulting", "to", "the", "current", "month", "." ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L10-L22
7,208
eyeseast/propublica-congress
congress/votes.py
VotesClient.by_range
def by_range(self, chamber, start, end): """ Return votes cast in a chamber between two dates, up to one month apart. """ check_chamber(chamber) start, end = parse_date(start), parse_date(end) if start > end: start, end = end, start path = "{chamber}/votes/{start:%Y-%m-%d}/{end:%Y-%m-%d}.json".format( chamber=chamber, start=start, end=end) return self.fetch(path, parse=lambda r: r['results'])
python
def by_range(self, chamber, start, end): check_chamber(chamber) start, end = parse_date(start), parse_date(end) if start > end: start, end = end, start path = "{chamber}/votes/{start:%Y-%m-%d}/{end:%Y-%m-%d}.json".format( chamber=chamber, start=start, end=end) return self.fetch(path, parse=lambda r: r['results'])
[ "def", "by_range", "(", "self", ",", "chamber", ",", "start", ",", "end", ")", ":", "check_chamber", "(", "chamber", ")", "start", ",", "end", "=", "parse_date", "(", "start", ")", ",", "parse_date", "(", "end", ")", "if", "start", ">", "end", ":", ...
Return votes cast in a chamber between two dates, up to one month apart.
[ "Return", "votes", "cast", "in", "a", "chamber", "between", "two", "dates", "up", "to", "one", "month", "apart", "." ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L24-L37
7,209
eyeseast/propublica-congress
congress/votes.py
VotesClient.by_date
def by_date(self, chamber, date): "Return votes cast in a chamber on a single day" date = parse_date(date) return self.by_range(chamber, date, date)
python
def by_date(self, chamber, date): "Return votes cast in a chamber on a single day" date = parse_date(date) return self.by_range(chamber, date, date)
[ "def", "by_date", "(", "self", ",", "chamber", ",", "date", ")", ":", "date", "=", "parse_date", "(", "date", ")", "return", "self", ".", "by_range", "(", "chamber", ",", "date", ",", "date", ")" ]
Return votes cast in a chamber on a single day
[ "Return", "votes", "cast", "in", "a", "chamber", "on", "a", "single", "day" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L39-L42
7,210
eyeseast/propublica-congress
congress/votes.py
VotesClient.today
def today(self, chamber): "Return today's votes in a given chamber" now = datetime.date.today() return self.by_range(chamber, now, now)
python
def today(self, chamber): "Return today's votes in a given chamber" now = datetime.date.today() return self.by_range(chamber, now, now)
[ "def", "today", "(", "self", ",", "chamber", ")", ":", "now", "=", "datetime", ".", "date", ".", "today", "(", ")", "return", "self", ".", "by_range", "(", "chamber", ",", "now", ",", "now", ")" ]
Return today's votes in a given chamber
[ "Return", "today", "s", "votes", "in", "a", "given", "chamber" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L44-L47
7,211
eyeseast/propublica-congress
congress/votes.py
VotesClient.nominations
def nominations(self, congress=CURRENT_CONGRESS): "Return votes on nominations from a given Congress" path = "{congress}/nominations.json".format(congress=congress) return self.fetch(path)
python
def nominations(self, congress=CURRENT_CONGRESS): "Return votes on nominations from a given Congress" path = "{congress}/nominations.json".format(congress=congress) return self.fetch(path)
[ "def", "nominations", "(", "self", ",", "congress", "=", "CURRENT_CONGRESS", ")", ":", "path", "=", "\"{congress}/nominations.json\"", ".", "format", "(", "congress", "=", "congress", ")", "return", "self", ".", "fetch", "(", "path", ")" ]
Return votes on nominations from a given Congress
[ "Return", "votes", "on", "nominations", "from", "a", "given", "Congress" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L86-L89
7,212
eyeseast/propublica-congress
congress/client.py
Client.fetch
def fetch(self, path, parse=lambda r: r['results'][0]): """ Make an API request, with authentication. This method can be used directly to fetch new endpoints or customize parsing. :: >>> from congress import Congress >>> client = Congress() >>> senate = client.fetch('115/senate/members.json') >>> print(senate['num_results']) 101 """ url = self.BASE_URI + path headers = {'X-API-Key': self.apikey} log.debug(url) resp, content = self.http.request(url, headers=headers) content = u(content) content = json.loads(content) # handle errors if not content.get('status') == 'OK': if "errors" in content and content['errors'][0]['error'] == "Record not found": raise NotFound(path) if content.get('status') == '404': raise NotFound(path) raise CongressError(content, resp, url) if callable(parse): content = parse(content) return content
python
def fetch(self, path, parse=lambda r: r['results'][0]): url = self.BASE_URI + path headers = {'X-API-Key': self.apikey} log.debug(url) resp, content = self.http.request(url, headers=headers) content = u(content) content = json.loads(content) # handle errors if not content.get('status') == 'OK': if "errors" in content and content['errors'][0]['error'] == "Record not found": raise NotFound(path) if content.get('status') == '404': raise NotFound(path) raise CongressError(content, resp, url) if callable(parse): content = parse(content) return content
[ "def", "fetch", "(", "self", ",", "path", ",", "parse", "=", "lambda", "r", ":", "r", "[", "'results'", "]", "[", "0", "]", ")", ":", "url", "=", "self", ".", "BASE_URI", "+", "path", "headers", "=", "{", "'X-API-Key'", ":", "self", ".", "apikey"...
Make an API request, with authentication. This method can be used directly to fetch new endpoints or customize parsing. :: >>> from congress import Congress >>> client = Congress() >>> senate = client.fetch('115/senate/members.json') >>> print(senate['num_results']) 101
[ "Make", "an", "API", "request", "with", "authentication", "." ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/client.py#L31-L70
7,213
eyeseast/propublica-congress
congress/utils.py
parse_date
def parse_date(s): """ Parse a date using dateutil.parser.parse if available, falling back to datetime.datetime.strptime if not """ if isinstance(s, (datetime.datetime, datetime.date)): return s try: from dateutil.parser import parse except ImportError: parse = lambda d: datetime.datetime.strptime(d, "%Y-%m-%d") return parse(s)
python
def parse_date(s): if isinstance(s, (datetime.datetime, datetime.date)): return s try: from dateutil.parser import parse except ImportError: parse = lambda d: datetime.datetime.strptime(d, "%Y-%m-%d") return parse(s)
[ "def", "parse_date", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ")", ")", ":", "return", "s", "try", ":", "from", "dateutil", ".", "parser", "import", "parse", "except", "Imp...
Parse a date using dateutil.parser.parse if available, falling back to datetime.datetime.strptime if not
[ "Parse", "a", "date", "using", "dateutil", ".", "parser", ".", "parse", "if", "available", "falling", "back", "to", "datetime", ".", "datetime", ".", "strptime", "if", "not" ]
03e519341063c5703080b4723112f1831816c77e
https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/utils.py#L40-L51
7,214
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy._prep_spark_sql_groupby
def _prep_spark_sql_groupby(self): """Used Spark SQL group approach""" # Strip the index info non_index_columns = filter(lambda x: x not in self._prdd._index_names, self._prdd._column_names()) self._grouped_spark_sql = (self._prdd.to_spark_sql() .select(non_index_columns) .groupBy(self._by)) self._columns = filter(lambda x: x != self._by, non_index_columns)
python
def _prep_spark_sql_groupby(self): # Strip the index info non_index_columns = filter(lambda x: x not in self._prdd._index_names, self._prdd._column_names()) self._grouped_spark_sql = (self._prdd.to_spark_sql() .select(non_index_columns) .groupBy(self._by)) self._columns = filter(lambda x: x != self._by, non_index_columns)
[ "def", "_prep_spark_sql_groupby", "(", "self", ")", ":", "# Strip the index info", "non_index_columns", "=", "filter", "(", "lambda", "x", ":", "x", "not", "in", "self", ".", "_prdd", ".", "_index_names", ",", "self", ".", "_prdd", ".", "_column_names", "(", ...
Used Spark SQL group approach
[ "Used", "Spark", "SQL", "group", "approach" ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L54-L63
7,215
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy._prep_pandas_groupby
def _prep_pandas_groupby(self): """Prepare the old school pandas group by based approach.""" myargs = self._myargs mykwargs = self._mykwargs def extract_keys(groupedFrame): for key, group in groupedFrame: yield (key, group) def group_and_extract(frame): return extract_keys(frame.groupby(*myargs, **mykwargs)) self._baseRDD = self._prdd._rdd() self._distributedRDD = self._baseRDD.flatMap(group_and_extract) self._mergedRDD = self._sortIfNeeded( self._group(self._distributedRDD))
python
def _prep_pandas_groupby(self): myargs = self._myargs mykwargs = self._mykwargs def extract_keys(groupedFrame): for key, group in groupedFrame: yield (key, group) def group_and_extract(frame): return extract_keys(frame.groupby(*myargs, **mykwargs)) self._baseRDD = self._prdd._rdd() self._distributedRDD = self._baseRDD.flatMap(group_and_extract) self._mergedRDD = self._sortIfNeeded( self._group(self._distributedRDD))
[ "def", "_prep_pandas_groupby", "(", "self", ")", ":", "myargs", "=", "self", ".", "_myargs", "mykwargs", "=", "self", ".", "_mykwargs", "def", "extract_keys", "(", "groupedFrame", ")", ":", "for", "key", ",", "group", "in", "groupedFrame", ":", "yield", "(...
Prepare the old school pandas group by based approach.
[ "Prepare", "the", "old", "school", "pandas", "group", "by", "based", "approach", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L65-L80
7,216
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy._group
def _group(self, rdd): """Group together the values with the same key.""" return rdd.reduceByKey(lambda x, y: x.append(y))
python
def _group(self, rdd): return rdd.reduceByKey(lambda x, y: x.append(y))
[ "def", "_group", "(", "self", ",", "rdd", ")", ":", "return", "rdd", ".", "reduceByKey", "(", "lambda", "x", ",", "y", ":", "x", ".", "append", "(", "y", ")", ")" ]
Group together the values with the same key.
[ "Group", "together", "the", "values", "with", "the", "same", "key", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L89-L91
7,217
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy.ngroups
def ngroups(self): """Number of groups.""" if self._can_use_new_school(): return self._grouped_spark_sql.count() self._prep_pandas_groupby() return self._mergedRDD.count()
python
def ngroups(self): if self._can_use_new_school(): return self._grouped_spark_sql.count() self._prep_pandas_groupby() return self._mergedRDD.count()
[ "def", "ngroups", "(", "self", ")", ":", "if", "self", ".", "_can_use_new_school", "(", ")", ":", "return", "self", ".", "_grouped_spark_sql", ".", "count", "(", ")", "self", ".", "_prep_pandas_groupby", "(", ")", "return", "self", ".", "_mergedRDD", ".", ...
Number of groups.
[ "Number", "of", "groups", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L129-L134
7,218
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy.sum
def sum(self): """Compute the sum for each group.""" if self._can_use_new_school(): self._prep_spark_sql_groupby() import pyspark.sql.functions as func return self._use_aggregation(func.sum) self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs def create_combiner(x): return x.groupby(*myargs, **mykwargs).sum() def merge_value(x, y): return pd.concat([x, create_combiner(y)]) def merge_combiner(x, y): return x + y rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey( create_combiner, merge_value, merge_combiner)).values() return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx)
python
def sum(self): if self._can_use_new_school(): self._prep_spark_sql_groupby() import pyspark.sql.functions as func return self._use_aggregation(func.sum) self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs def create_combiner(x): return x.groupby(*myargs, **mykwargs).sum() def merge_value(x, y): return pd.concat([x, create_combiner(y)]) def merge_combiner(x, y): return x + y rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey( create_combiner, merge_value, merge_combiner)).values() return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx)
[ "def", "sum", "(", "self", ")", ":", "if", "self", ".", "_can_use_new_school", "(", ")", ":", "self", ".", "_prep_spark_sql_groupby", "(", ")", "import", "pyspark", ".", "sql", ".", "functions", "as", "func", "return", "self", ".", "_use_aggregation", "(",...
Compute the sum for each group.
[ "Compute", "the", "sum", "for", "each", "group", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L180-L203
7,219
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy._create_exprs_using_func
def _create_exprs_using_func(self, f, columns): """Create aggregate expressions using the provided function with the result coming back as the original column name.""" expressions = map(lambda c: f(c).alias(c), self._columns) return expressions
python
def _create_exprs_using_func(self, f, columns): expressions = map(lambda c: f(c).alias(c), self._columns) return expressions
[ "def", "_create_exprs_using_func", "(", "self", ",", "f", ",", "columns", ")", ":", "expressions", "=", "map", "(", "lambda", "c", ":", "f", "(", "c", ")", ".", "alias", "(", "c", ")", ",", "self", ".", "_columns", ")", "return", "expressions" ]
Create aggregate expressions using the provided function with the result coming back as the original column name.
[ "Create", "aggregate", "expressions", "using", "the", "provided", "function", "with", "the", "result", "coming", "back", "as", "the", "original", "column", "name", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L205-L210
7,220
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy._use_aggregation
def _use_aggregation(self, agg, columns=None): """Compute the result using the aggregation function provided. The aggregation name must also be provided so we can strip of the extra name that Spark SQL adds.""" if not columns: columns = self._columns from pyspark.sql import functions as F aggs = map(lambda column: agg(column).alias(column), self._columns) aggRdd = self._grouped_spark_sql.agg(*aggs) df = DataFrame.from_schema_rdd(aggRdd, self._by) return df
python
def _use_aggregation(self, agg, columns=None): if not columns: columns = self._columns from pyspark.sql import functions as F aggs = map(lambda column: agg(column).alias(column), self._columns) aggRdd = self._grouped_spark_sql.agg(*aggs) df = DataFrame.from_schema_rdd(aggRdd, self._by) return df
[ "def", "_use_aggregation", "(", "self", ",", "agg", ",", "columns", "=", "None", ")", ":", "if", "not", "columns", ":", "columns", "=", "self", ".", "_columns", "from", "pyspark", ".", "sql", "import", "functions", "as", "F", "aggs", "=", "map", "(", ...
Compute the result using the aggregation function provided. The aggregation name must also be provided so we can strip of the extra name that Spark SQL adds.
[ "Compute", "the", "result", "using", "the", "aggregation", "function", "provided", ".", "The", "aggregation", "name", "must", "also", "be", "provided", "so", "we", "can", "strip", "of", "the", "extra", "name", "that", "Spark", "SQL", "adds", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L287-L297
7,221
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy._regroup_mergedRDD
def _regroup_mergedRDD(self): """A common pattern is we want to call groupby again on the dataframes so we can use the groupby functions. """ myargs = self._myargs mykwargs = self._mykwargs self._prep_pandas_groupby() def regroup(df): return df.groupby(*myargs, **mykwargs) return self._mergedRDD.mapValues(regroup)
python
def _regroup_mergedRDD(self): myargs = self._myargs mykwargs = self._mykwargs self._prep_pandas_groupby() def regroup(df): return df.groupby(*myargs, **mykwargs) return self._mergedRDD.mapValues(regroup)
[ "def", "_regroup_mergedRDD", "(", "self", ")", ":", "myargs", "=", "self", ".", "_myargs", "mykwargs", "=", "self", ".", "_mykwargs", "self", ".", "_prep_pandas_groupby", "(", ")", "def", "regroup", "(", "df", ")", ":", "return", "df", ".", "groupby", "(...
A common pattern is we want to call groupby again on the dataframes so we can use the groupby functions.
[ "A", "common", "pattern", "is", "we", "want", "to", "call", "groupby", "again", "on", "the", "dataframes", "so", "we", "can", "use", "the", "groupby", "functions", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L353-L364
7,222
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy.nth
def nth(self, n, *args, **kwargs): """Take the nth element of each grouby.""" # TODO: Stop collecting the entire frame for each key. self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs nthRDD = self._regroup_mergedRDD().mapValues( lambda r: r.nth( n, *args, **kwargs)).values() return DataFrame.fromDataFrameRDD(nthRDD, self.sql_ctx)
python
def nth(self, n, *args, **kwargs): # TODO: Stop collecting the entire frame for each key. self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs nthRDD = self._regroup_mergedRDD().mapValues( lambda r: r.nth( n, *args, **kwargs)).values() return DataFrame.fromDataFrameRDD(nthRDD, self.sql_ctx)
[ "def", "nth", "(", "self", ",", "n", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: Stop collecting the entire frame for each key.", "self", ".", "_prep_pandas_groupby", "(", ")", "myargs", "=", "self", ".", "_myargs", "mykwargs", "=", "self", ...
Take the nth element of each grouby.
[ "Take", "the", "nth", "element", "of", "each", "grouby", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L366-L375
7,223
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
GroupBy.apply
def apply(self, func, *args, **kwargs): """Apply the provided function and combine the results together in the same way as apply from groupby in pandas. This returns a DataFrame. """ self._prep_pandas_groupby() def key_by_index(data): """Key each row by its index. """ # TODO: Is there a better way to do this? for key, row in data.iterrows(): yield (key, pd.DataFrame.from_dict( dict([(key, row)]), orient='index')) myargs = self._myargs mykwargs = self._mykwargs regroupedRDD = self._distributedRDD.mapValues( lambda data: data.groupby(*myargs, **mykwargs)) appliedRDD = regroupedRDD.map( lambda key_data: key_data[1].apply(func, *args, **kwargs)) reKeyedRDD = appliedRDD.flatMap(key_by_index) dataframe = self._sortIfNeeded(reKeyedRDD).values() return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
python
def apply(self, func, *args, **kwargs): self._prep_pandas_groupby() def key_by_index(data): """Key each row by its index. """ # TODO: Is there a better way to do this? for key, row in data.iterrows(): yield (key, pd.DataFrame.from_dict( dict([(key, row)]), orient='index')) myargs = self._myargs mykwargs = self._mykwargs regroupedRDD = self._distributedRDD.mapValues( lambda data: data.groupby(*myargs, **mykwargs)) appliedRDD = regroupedRDD.map( lambda key_data: key_data[1].apply(func, *args, **kwargs)) reKeyedRDD = appliedRDD.flatMap(key_by_index) dataframe = self._sortIfNeeded(reKeyedRDD).values() return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
[ "def", "apply", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_prep_pandas_groupby", "(", ")", "def", "key_by_index", "(", "data", ")", ":", "\"\"\"Key each row by its index.\n \"\"\"", "# TODO: Is there ...
Apply the provided function and combine the results together in the same way as apply from groupby in pandas. This returns a DataFrame.
[ "Apply", "the", "provided", "function", "and", "combine", "the", "results", "together", "in", "the", "same", "way", "as", "apply", "from", "groupby", "in", "pandas", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L398-L422
7,224
sparklingpandas/sparklingpandas
sparklingpandas/custom_functions.py
_create_function
def _create_function(name, doc=""): """ Create a function for aggregator by name""" def _(col): spark_ctx = SparkContext._active_spark_context java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions, name) (col._java_ctx if isinstance(col, Column) else col)) return Column(java_ctx) _.__name__ = name _.__doc__ = doc return _
python
def _create_function(name, doc=""): def _(col): spark_ctx = SparkContext._active_spark_context java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions, name) (col._java_ctx if isinstance(col, Column) else col)) return Column(java_ctx) _.__name__ = name _.__doc__ = doc return _
[ "def", "_create_function", "(", "name", ",", "doc", "=", "\"\"", ")", ":", "def", "_", "(", "col", ")", ":", "spark_ctx", "=", "SparkContext", ".", "_active_spark_context", "java_ctx", "=", "(", "getattr", "(", "spark_ctx", ".", "_jvm", ".", "com", ".", ...
Create a function for aggregator by name
[ "Create", "a", "function", "for", "aggregator", "by", "name" ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/custom_functions.py#L10-L20
7,225
sparklingpandas/sparklingpandas
sparklingpandas/pstatcounter.py
PStatCounter.merge
def merge(self, frame): """ Add another DataFrame to the PStatCounter. """ for column, values in frame.iteritems(): # Temporary hack, fix later counter = self._counters.get(column) for value in values: if counter is not None: counter.merge(value)
python
def merge(self, frame): for column, values in frame.iteritems(): # Temporary hack, fix later counter = self._counters.get(column) for value in values: if counter is not None: counter.merge(value)
[ "def", "merge", "(", "self", ",", "frame", ")", ":", "for", "column", ",", "values", "in", "frame", ".", "iteritems", "(", ")", ":", "# Temporary hack, fix later", "counter", "=", "self", ".", "_counters", ".", "get", "(", "column", ")", "for", "value", ...
Add another DataFrame to the PStatCounter.
[ "Add", "another", "DataFrame", "to", "the", "PStatCounter", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pstatcounter.py#L58-L67
7,226
sparklingpandas/sparklingpandas
sparklingpandas/pstatcounter.py
PStatCounter.merge_pstats
def merge_pstats(self, other): """ Merge all of the stats counters of the other PStatCounter with our counters. """ if not isinstance(other, PStatCounter): raise Exception("Can only merge PStatcounters!") for column, counter in self._counters.items(): other_counter = other._counters.get(column) self._counters[column] = counter.mergeStats(other_counter) return self
python
def merge_pstats(self, other): if not isinstance(other, PStatCounter): raise Exception("Can only merge PStatcounters!") for column, counter in self._counters.items(): other_counter = other._counters.get(column) self._counters[column] = counter.mergeStats(other_counter) return self
[ "def", "merge_pstats", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "PStatCounter", ")", ":", "raise", "Exception", "(", "\"Can only merge PStatcounters!\"", ")", "for", "column", ",", "counter", "in", "self", ".", "_cou...
Merge all of the stats counters of the other PStatCounter with our counters.
[ "Merge", "all", "of", "the", "stats", "counters", "of", "the", "other", "PStatCounter", "with", "our", "counters", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pstatcounter.py#L69-L81
7,227
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
_update_index_on_df
def _update_index_on_df(df, index_names): """Helper function to restore index information after collection. Doesn't use self so we can serialize this.""" if index_names: df = df.set_index(index_names) # Remove names from unnamed indexes index_names = _denormalize_index_names(index_names) df.index.names = index_names return df
python
def _update_index_on_df(df, index_names): if index_names: df = df.set_index(index_names) # Remove names from unnamed indexes index_names = _denormalize_index_names(index_names) df.index.names = index_names return df
[ "def", "_update_index_on_df", "(", "df", ",", "index_names", ")", ":", "if", "index_names", ":", "df", "=", "df", ".", "set_index", "(", "index_names", ")", "# Remove names from unnamed indexes", "index_names", "=", "_denormalize_index_names", "(", "index_names", ")...
Helper function to restore index information after collection. Doesn't use self so we can serialize this.
[ "Helper", "function", "to", "restore", "index", "information", "after", "collection", ".", "Doesn", "t", "use", "self", "so", "we", "can", "serialize", "this", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L272-L280
7,228
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame._rdd
def _rdd(self): """Return an RDD of Panda DataFrame objects. This can be expensive especially if we don't do a narrow transformation after and get it back to Spark SQL land quickly.""" columns = self._schema_rdd.columns index_names = self._index_names def fromRecords(records): if not records: return [] else: loaded_df = pd.DataFrame.from_records([records], columns=columns) indexed_df = _update_index_on_df(loaded_df, index_names) return [indexed_df] return self._schema_rdd.rdd.flatMap(fromRecords)
python
def _rdd(self): columns = self._schema_rdd.columns index_names = self._index_names def fromRecords(records): if not records: return [] else: loaded_df = pd.DataFrame.from_records([records], columns=columns) indexed_df = _update_index_on_df(loaded_df, index_names) return [indexed_df] return self._schema_rdd.rdd.flatMap(fromRecords)
[ "def", "_rdd", "(", "self", ")", ":", "columns", "=", "self", ".", "_schema_rdd", ".", "columns", "index_names", "=", "self", ".", "_index_names", "def", "fromRecords", "(", "records", ")", ":", "if", "not", "records", ":", "return", "[", "]", "else", ...
Return an RDD of Panda DataFrame objects. This can be expensive especially if we don't do a narrow transformation after and get it back to Spark SQL land quickly.
[ "Return", "an", "RDD", "of", "Panda", "DataFrame", "objects", ".", "This", "can", "be", "expensive", "especially", "if", "we", "don", "t", "do", "a", "narrow", "transformation", "after", "and", "get", "it", "back", "to", "Spark", "SQL", "land", "quickly", ...
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L43-L59
7,229
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame._column_names
def _column_names(self): """Return the column names""" index_names = set(_normalize_index_names(self._index_names)) column_names = [col_name for col_name in self._schema_rdd.columns if col_name not in index_names] return column_names
python
def _column_names(self): index_names = set(_normalize_index_names(self._index_names)) column_names = [col_name for col_name in self._schema_rdd.columns if col_name not in index_names] return column_names
[ "def", "_column_names", "(", "self", ")", ":", "index_names", "=", "set", "(", "_normalize_index_names", "(", "self", ".", "_index_names", ")", ")", "column_names", "=", "[", "col_name", "for", "col_name", "in", "self", ".", "_schema_rdd", ".", "columns", "i...
Return the column names
[ "Return", "the", "column", "names" ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L61-L66
7,230
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame._evil_apply_with_dataframes
def _evil_apply_with_dataframes(self, func, preserves_cols=False): """Convert the underlying SchmeaRDD to an RDD of DataFrames. apply the provide function and convert the result back. This is hella slow.""" source_rdd = self._rdd() result_rdd = func(source_rdd) # By default we don't know what the columns & indexes are so we let # from_rdd_of_dataframes look at the first partition to determine them. column_idxs = None if preserves_cols: index_names = self._index_names # Remove indexes from the columns columns = self._schema_rdd.columns[len(self._index_names):] column_idxs = (columns, index_names) return self.from_rdd_of_dataframes( result_rdd, column_idxs=column_idxs)
python
def _evil_apply_with_dataframes(self, func, preserves_cols=False): source_rdd = self._rdd() result_rdd = func(source_rdd) # By default we don't know what the columns & indexes are so we let # from_rdd_of_dataframes look at the first partition to determine them. column_idxs = None if preserves_cols: index_names = self._index_names # Remove indexes from the columns columns = self._schema_rdd.columns[len(self._index_names):] column_idxs = (columns, index_names) return self.from_rdd_of_dataframes( result_rdd, column_idxs=column_idxs)
[ "def", "_evil_apply_with_dataframes", "(", "self", ",", "func", ",", "preserves_cols", "=", "False", ")", ":", "source_rdd", "=", "self", ".", "_rdd", "(", ")", "result_rdd", "=", "func", "(", "source_rdd", ")", "# By default we don't know what the columns & indexes...
Convert the underlying SchmeaRDD to an RDD of DataFrames. apply the provide function and convert the result back. This is hella slow.
[ "Convert", "the", "underlying", "SchmeaRDD", "to", "an", "RDD", "of", "DataFrames", ".", "apply", "the", "provide", "function", "and", "convert", "the", "result", "back", ".", "This", "is", "hella", "slow", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L68-L83
7,231
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame._first_as_df
def _first_as_df(self): """Gets the first row as a Panda's DataFrame. Useful for functions like dtypes & ftypes""" columns = self._schema_rdd.columns df = pd.DataFrame.from_records( [self._schema_rdd.first()], columns=self._schema_rdd.columns) df = _update_index_on_df(df, self._index_names) return df
python
def _first_as_df(self): columns = self._schema_rdd.columns df = pd.DataFrame.from_records( [self._schema_rdd.first()], columns=self._schema_rdd.columns) df = _update_index_on_df(df, self._index_names) return df
[ "def", "_first_as_df", "(", "self", ")", ":", "columns", "=", "self", ".", "_schema_rdd", ".", "columns", "df", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "[", "self", ".", "_schema_rdd", ".", "first", "(", ")", "]", ",", "columns", "=", ...
Gets the first row as a Panda's DataFrame. Useful for functions like dtypes & ftypes
[ "Gets", "the", "first", "row", "as", "a", "Panda", "s", "DataFrame", ".", "Useful", "for", "functions", "like", "dtypes", "&", "ftypes" ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L85-L93
7,232
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame.fromDataFrameRDD
def fromDataFrameRDD(cls, rdd, sql_ctx): """Construct a DataFrame from an RDD of DataFrames. No checking or validation occurs.""" result = DataFrame(None, sql_ctx) return result.from_rdd_of_dataframes(rdd)
python
def fromDataFrameRDD(cls, rdd, sql_ctx): result = DataFrame(None, sql_ctx) return result.from_rdd_of_dataframes(rdd)
[ "def", "fromDataFrameRDD", "(", "cls", ",", "rdd", ",", "sql_ctx", ")", ":", "result", "=", "DataFrame", "(", "None", ",", "sql_ctx", ")", "return", "result", ".", "from_rdd_of_dataframes", "(", "rdd", ")" ]
Construct a DataFrame from an RDD of DataFrames. No checking or validation occurs.
[ "Construct", "a", "DataFrame", "from", "an", "RDD", "of", "DataFrames", ".", "No", "checking", "or", "validation", "occurs", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L145-L149
7,233
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame.applymap
def applymap(self, f, **kwargs): """Return a new DataFrame by applying a function to each element of each Panda DataFrame.""" def transform_rdd(rdd): return rdd.map(lambda data: data.applymap(f), **kwargs) return self._evil_apply_with_dataframes(transform_rdd, preserves_cols=True)
python
def applymap(self, f, **kwargs): def transform_rdd(rdd): return rdd.map(lambda data: data.applymap(f), **kwargs) return self._evil_apply_with_dataframes(transform_rdd, preserves_cols=True)
[ "def", "applymap", "(", "self", ",", "f", ",", "*", "*", "kwargs", ")", ":", "def", "transform_rdd", "(", "rdd", ")", ":", "return", "rdd", ".", "map", "(", "lambda", "data", ":", "data", ".", "applymap", "(", "f", ")", ",", "*", "*", "kwargs", ...
Return a new DataFrame by applying a function to each element of each Panda DataFrame.
[ "Return", "a", "new", "DataFrame", "by", "applying", "a", "function", "to", "each", "element", "of", "each", "Panda", "DataFrame", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L165-L171
7,234
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame.groupby
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False): """Returns a groupby on the schema rdd. This returns a GroupBy object. Note that grouping by a column name will be faster than most other options due to implementation.""" from sparklingpandas.groupby import GroupBy return GroupBy(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze)
python
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False): from sparklingpandas.groupby import GroupBy return GroupBy(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze)
[ "def", "groupby", "(", "self", ",", "by", "=", "None", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "as_index", "=", "True", ",", "sort", "=", "True", ",", "group_keys", "=", "True", ",", "squeeze", "=", "False", ")", ":", "from", "spar...
Returns a groupby on the schema rdd. This returns a GroupBy object. Note that grouping by a column name will be faster than most other options due to implementation.
[ "Returns", "a", "groupby", "on", "the", "schema", "rdd", ".", "This", "returns", "a", "GroupBy", "object", ".", "Note", "that", "grouping", "by", "a", "column", "name", "will", "be", "faster", "than", "most", "other", "options", "due", "to", "implementatio...
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L181-L188
7,235
sparklingpandas/sparklingpandas
sparklingpandas/dataframe.py
DataFrame.collect
def collect(self): """Collect the elements in an DataFrame and concatenate the partition.""" local_df = self._schema_rdd.toPandas() correct_idx_df = _update_index_on_df(local_df, self._index_names) return correct_idx_df
python
def collect(self): local_df = self._schema_rdd.toPandas() correct_idx_df = _update_index_on_df(local_df, self._index_names) return correct_idx_df
[ "def", "collect", "(", "self", ")", ":", "local_df", "=", "self", ".", "_schema_rdd", ".", "toPandas", "(", ")", "correct_idx_df", "=", "_update_index_on_df", "(", "local_df", ",", "self", ".", "_index_names", ")", "return", "correct_idx_df" ]
Collect the elements in an DataFrame and concatenate the partition.
[ "Collect", "the", "elements", "in", "an", "DataFrame", "and", "concatenate", "the", "partition", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L233-L238
7,236
sparklingpandas/sparklingpandas
sparklingpandas/prdd.py
PRDD.applymap
def applymap(self, func, **kwargs): """Return a new PRDD by applying a function to each element of each pandas DataFrame.""" return self.from_rdd( self._rdd.map(lambda data: data.applymap(func), **kwargs))
python
def applymap(self, func, **kwargs): return self.from_rdd( self._rdd.map(lambda data: data.applymap(func), **kwargs))
[ "def", "applymap", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "from_rdd", "(", "self", ".", "_rdd", ".", "map", "(", "lambda", "data", ":", "data", ".", "applymap", "(", "func", ")", ",", "*", "*", "kwargs...
Return a new PRDD by applying a function to each element of each pandas DataFrame.
[ "Return", "a", "new", "PRDD", "by", "applying", "a", "function", "to", "each", "element", "of", "each", "pandas", "DataFrame", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/prdd.py#L49-L53
7,237
sparklingpandas/sparklingpandas
sparklingpandas/prdd.py
PRDD.collect
def collect(self): """Collect the elements in an PRDD and concatenate the partition.""" # The order of the frame order appends is based on the implementation # of reduce which calls our function with # f(valueToBeAdded, accumulator) so we do our reduce implementation. def append_frames(frame_a, frame_b): return frame_a.append(frame_b) return self._custom_rdd_reduce(append_frames)
python
def collect(self): # The order of the frame order appends is based on the implementation # of reduce which calls our function with # f(valueToBeAdded, accumulator) so we do our reduce implementation. def append_frames(frame_a, frame_b): return frame_a.append(frame_b) return self._custom_rdd_reduce(append_frames)
[ "def", "collect", "(", "self", ")", ":", "# The order of the frame order appends is based on the implementation", "# of reduce which calls our function with", "# f(valueToBeAdded, accumulator) so we do our reduce implementation.", "def", "append_frames", "(", "frame_a", ",", "frame_b", ...
Collect the elements in an PRDD and concatenate the partition.
[ "Collect", "the", "elements", "in", "an", "PRDD", "and", "concatenate", "the", "partition", "." ]
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/prdd.py#L108-L115
7,238
sparklingpandas/sparklingpandas
sparklingpandas/prdd.py
PRDD._custom_rdd_reduce
def _custom_rdd_reduce(self, reduce_func): """Provides a custom RDD reduce which preserves ordering if the RDD has been sorted. This is useful for us because we need this functionality as many pandas operations support sorting the results. The standard reduce in PySpark does not have this property. Note that when PySpark no longer does partition reduces locally this code will also need to be updated.""" def accumulating_iter(iterator): acc = None for obj in iterator: if acc is None: acc = obj else: acc = reduce_func(acc, obj) if acc is not None: yield acc vals = self._rdd.mapPartitions(accumulating_iter).collect() return reduce(accumulating_iter, vals)
python
def _custom_rdd_reduce(self, reduce_func): def accumulating_iter(iterator): acc = None for obj in iterator: if acc is None: acc = obj else: acc = reduce_func(acc, obj) if acc is not None: yield acc vals = self._rdd.mapPartitions(accumulating_iter).collect() return reduce(accumulating_iter, vals)
[ "def", "_custom_rdd_reduce", "(", "self", ",", "reduce_func", ")", ":", "def", "accumulating_iter", "(", "iterator", ")", ":", "acc", "=", "None", "for", "obj", "in", "iterator", ":", "if", "acc", "is", "None", ":", "acc", "=", "obj", "else", ":", "acc...
Provides a custom RDD reduce which preserves ordering if the RDD has been sorted. This is useful for us because we need this functionality as many pandas operations support sorting the results. The standard reduce in PySpark does not have this property. Note that when PySpark no longer does partition reduces locally this code will also need to be updated.
[ "Provides", "a", "custom", "RDD", "reduce", "which", "preserves", "ordering", "if", "the", "RDD", "has", "been", "sorted", ".", "This", "is", "useful", "for", "us", "because", "we", "need", "this", "functionality", "as", "many", "pandas", "operations", "supp...
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/prdd.py#L117-L134
7,239
sparklingpandas/sparklingpandas
sparklingpandas/pcontext.py
PSparkContext.read_csv
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0, *args, **kwargs): """Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def csv_file(partition_number, files): # pylint: disable=unexpected-keyword-arg file_count = 0 for _, contents in files: # Only skip lines on the first file if partition_number == 0 and file_count == 0 and _skiprows > 0: yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs) else: file_count += 1 yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, **kwargs) def csv_rows(partition_number, rows): # pylint: disable=unexpected-keyword-arg in_str = "\n".join(rows) if partition_number == 0: return iter([ pandas.read_csv( sio(in_str), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs)]) else: # could use .iterows instead? return iter([pandas.read_csv(sio(in_str), *args, header=None, names=mynames, **kwargs)]) # If we need to peak at the first partition and determine the column # names mynames = None _skiprows = skiprows if names: mynames = names else: # In the future we could avoid this expensive call. first_line = self.spark_ctx.textFile(file_path).first() frame = pandas.read_csv(sio(first_line), **kwargs) # pylint sees frame as a tuple despite it being a DataFrame mynames = list(frame.columns) _skiprows += 1 # Do the actual load if use_whole_file: return self.from_pandas_rdd( self.spark_ctx.wholeTextFiles(file_path) .mapPartitionsWithIndex(csv_file)) else: return self.from_pandas_rdd( self.spark_ctx.textFile(file_path) .mapPartitionsWithIndex(csv_rows))
python
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0, *args, **kwargs): def csv_file(partition_number, files): # pylint: disable=unexpected-keyword-arg file_count = 0 for _, contents in files: # Only skip lines on the first file if partition_number == 0 and file_count == 0 and _skiprows > 0: yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs) else: file_count += 1 yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, **kwargs) def csv_rows(partition_number, rows): # pylint: disable=unexpected-keyword-arg in_str = "\n".join(rows) if partition_number == 0: return iter([ pandas.read_csv( sio(in_str), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs)]) else: # could use .iterows instead? return iter([pandas.read_csv(sio(in_str), *args, header=None, names=mynames, **kwargs)]) # If we need to peak at the first partition and determine the column # names mynames = None _skiprows = skiprows if names: mynames = names else: # In the future we could avoid this expensive call. first_line = self.spark_ctx.textFile(file_path).first() frame = pandas.read_csv(sio(first_line), **kwargs) # pylint sees frame as a tuple despite it being a DataFrame mynames = list(frame.columns) _skiprows += 1 # Do the actual load if use_whole_file: return self.from_pandas_rdd( self.spark_ctx.wholeTextFiles(file_path) .mapPartitionsWithIndex(csv_file)) else: return self.from_pandas_rdd( self.spark_ctx.textFile(file_path) .mapPartitionsWithIndex(csv_rows))
[ "def", "read_csv", "(", "self", ",", "file_path", ",", "use_whole_file", "=", "False", ",", "names", "=", "None", ",", "skiprows", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "csv_file", "(", "partition_number", ",", "files", ...
Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file.
[ "Read", "a", "CSV", "file", "in", "and", "parse", "it", "into", "Pandas", "DataFrames", ".", "By", "default", "the", "first", "row", "from", "the", "first", "partition", "of", "that", "data", "is", "parsed", "and", "used", "as", "the", "column", "names",...
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pcontext.py#L68-L155
7,240
sparklingpandas/sparklingpandas
sparklingpandas/pcontext.py
PSparkContext.read_json
def read_json(self, file_path, *args, **kwargs): """Read a json file in and parse it into Pandas DataFrames. If no names is provided we use the first row for the names. Currently, it is not possible to skip the first n rows of a file. Headers are provided in the json file and not specified separately. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'my/path/in/local/file/system' or 'hdfs:/user/juliet/' Other than skipRows, all additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def json_file_to_df(files): """ Transforms a JSON file into a list of data""" for _, contents in files: yield pandas.read_json(sio(contents), *args, **kwargs) return self.from_pandas_rdd(self.spark_ctx.wholeTextFiles(file_path) .mapPartitions(json_file_to_df))
python
def read_json(self, file_path, *args, **kwargs): def json_file_to_df(files): """ Transforms a JSON file into a list of data""" for _, contents in files: yield pandas.read_json(sio(contents), *args, **kwargs) return self.from_pandas_rdd(self.spark_ctx.wholeTextFiles(file_path) .mapPartitions(json_file_to_df))
[ "def", "read_json", "(", "self", ",", "file_path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "json_file_to_df", "(", "files", ")", ":", "\"\"\" Transforms a JSON file into a list of data\"\"\"", "for", "_", ",", "contents", "in", "files", ":"...
Read a json file in and parse it into Pandas DataFrames. If no names is provided we use the first row for the names. Currently, it is not possible to skip the first n rows of a file. Headers are provided in the json file and not specified separately. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'my/path/in/local/file/system' or 'hdfs:/user/juliet/' Other than skipRows, all additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file.
[ "Read", "a", "json", "file", "in", "and", "parse", "it", "into", "Pandas", "DataFrames", ".", "If", "no", "names", "is", "provided", "we", "use", "the", "first", "row", "for", "the", "names", ".", "Currently", "it", "is", "not", "possible", "to", "skip...
7d549df4348c979042b683c355aa778fc6d3a768
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pcontext.py#L303-L329
7,241
zimeon/iiif
iiif/manipulator_gen.py
IIIFManipulatorGen.do_first
def do_first(self): """Load generator, set size. We take the generator module name from self.srcfile so that this manipulator will work with different generators in a similar way to how the ordinary generators work with different images """ # Load generator module and create instance if we haven't already if (not self.srcfile): raise IIIFError(text=("No generator specified")) if (not self.gen): try: (name, ext) = os.path.splitext(self.srcfile) (pack, mod) = os.path.split(name) module_name = 'iiif.generators.' + mod try: module = sys.modules[module_name] except KeyError: self.logger.debug( "Loading generator module %s" % (module_name)) # Would be nice to use importlib but this is available only # in python 2.7 and higher pack = __import__(module_name) # returns iiif package module = getattr(pack.generators, mod) self.gen = module.PixelGen() except ImportError: raise IIIFError( text=("Failed to load generator %s" % (str(self.srcfile)))) (self.width, self.height) = self.gen.size
python
def do_first(self): # Load generator module and create instance if we haven't already if (not self.srcfile): raise IIIFError(text=("No generator specified")) if (not self.gen): try: (name, ext) = os.path.splitext(self.srcfile) (pack, mod) = os.path.split(name) module_name = 'iiif.generators.' + mod try: module = sys.modules[module_name] except KeyError: self.logger.debug( "Loading generator module %s" % (module_name)) # Would be nice to use importlib but this is available only # in python 2.7 and higher pack = __import__(module_name) # returns iiif package module = getattr(pack.generators, mod) self.gen = module.PixelGen() except ImportError: raise IIIFError( text=("Failed to load generator %s" % (str(self.srcfile)))) (self.width, self.height) = self.gen.size
[ "def", "do_first", "(", "self", ")", ":", "# Load generator module and create instance if we haven't already", "if", "(", "not", "self", ".", "srcfile", ")", ":", "raise", "IIIFError", "(", "text", "=", "(", "\"No generator specified\"", ")", ")", "if", "(", "not"...
Load generator, set size. We take the generator module name from self.srcfile so that this manipulator will work with different generators in a similar way to how the ordinary generators work with different images
[ "Load", "generator", "set", "size", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_gen.py#L31-L60
7,242
zimeon/iiif
iiif/manipulator_gen.py
IIIFManipulatorGen.do_region
def do_region(self, x, y, w, h): """Record region.""" if (x is None): self.rx = 0 self.ry = 0 self.rw = self.width self.rh = self.height else: self.rx = x self.ry = y self.rw = w self.rh = h
python
def do_region(self, x, y, w, h): if (x is None): self.rx = 0 self.ry = 0 self.rw = self.width self.rh = self.height else: self.rx = x self.ry = y self.rw = w self.rh = h
[ "def", "do_region", "(", "self", ",", "x", ",", "y", ",", "w", ",", "h", ")", ":", "if", "(", "x", "is", "None", ")", ":", "self", ".", "rx", "=", "0", "self", ".", "ry", "=", "0", "self", ".", "rw", "=", "self", ".", "width", "self", "."...
Record region.
[ "Record", "region", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_gen.py#L62-L73
7,243
zimeon/iiif
iiif/manipulator_gen.py
IIIFManipulatorGen.do_size
def do_size(self, w, h): """Record size.""" if (w is None): self.sw = self.rw self.sh = self.rh else: self.sw = w self.sh = h # Now we have region and size, generate the image image = Image.new("RGB", (self.sw, self.sh), self.gen.background_color) for y in range(0, self.sh): for x in range(0, self.sw): ix = int((x * self.rw) // self.sw + self.rx) iy = int((y * self.rh) // self.sh + self.ry) color = self.gen.pixel(ix, iy) if (color is not None): image.putpixel((x, y), color) self.image = image
python
def do_size(self, w, h): if (w is None): self.sw = self.rw self.sh = self.rh else: self.sw = w self.sh = h # Now we have region and size, generate the image image = Image.new("RGB", (self.sw, self.sh), self.gen.background_color) for y in range(0, self.sh): for x in range(0, self.sw): ix = int((x * self.rw) // self.sw + self.rx) iy = int((y * self.rh) // self.sh + self.ry) color = self.gen.pixel(ix, iy) if (color is not None): image.putpixel((x, y), color) self.image = image
[ "def", "do_size", "(", "self", ",", "w", ",", "h", ")", ":", "if", "(", "w", "is", "None", ")", ":", "self", ".", "sw", "=", "self", ".", "rw", "self", ".", "sh", "=", "self", ".", "rh", "else", ":", "self", ".", "sw", "=", "w", "self", "...
Record size.
[ "Record", "size", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_gen.py#L75-L92
7,244
zimeon/iiif
iiif/auth_basic.py
IIIFAuthBasic.login_handler
def login_handler(self, config=None, prefix=None, **args): """HTTP Basic login handler. Respond with 401 and WWW-Authenticate header if there are no credentials or bad credentials. If there are credentials then simply check for username equal to password for validity. """ headers = {} headers['Access-control-allow-origin'] = '*' headers['Content-type'] = 'text/html' auth = request.authorization if (auth and auth.username == auth.password): return self.set_cookie_close_window_response( "valid-http-basic-login") else: headers['WWW-Authenticate'] = ( 'Basic realm="HTTP-Basic-Auth at %s (u=p to login)"' % (self.name)) return make_response("", 401, headers)
python
def login_handler(self, config=None, prefix=None, **args): headers = {} headers['Access-control-allow-origin'] = '*' headers['Content-type'] = 'text/html' auth = request.authorization if (auth and auth.username == auth.password): return self.set_cookie_close_window_response( "valid-http-basic-login") else: headers['WWW-Authenticate'] = ( 'Basic realm="HTTP-Basic-Auth at %s (u=p to login)"' % (self.name)) return make_response("", 401, headers)
[ "def", "login_handler", "(", "self", ",", "config", "=", "None", ",", "prefix", "=", "None", ",", "*", "*", "args", ")", ":", "headers", "=", "{", "}", "headers", "[", "'Access-control-allow-origin'", "]", "=", "'*'", "headers", "[", "'Content-type'", "]...
HTTP Basic login handler. Respond with 401 and WWW-Authenticate header if there are no credentials or bad credentials. If there are credentials then simply check for username equal to password for validity.
[ "HTTP", "Basic", "login", "handler", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_basic.py#L26-L44
7,245
zimeon/iiif
iiif/auth_clickthrough.py
IIIFAuthClickthrough.login_service_description
def login_service_description(self): """Clickthrough login service description. The login service description _MUST_ include the token service description. Additionally, for a clickthroudh loginThe authentication pattern is indicated via the profile URI which is built using self.auth_pattern. """ desc = super(IIIFAuthClickthrough, self).login_service_description() desc['confirmLabel'] = self.confirm_label return desc
python
def login_service_description(self): desc = super(IIIFAuthClickthrough, self).login_service_description() desc['confirmLabel'] = self.confirm_label return desc
[ "def", "login_service_description", "(", "self", ")", ":", "desc", "=", "super", "(", "IIIFAuthClickthrough", ",", "self", ")", ".", "login_service_description", "(", ")", "desc", "[", "'confirmLabel'", "]", "=", "self", ".", "confirm_label", "return", "desc" ]
Clickthrough login service description. The login service description _MUST_ include the token service description. Additionally, for a clickthroudh loginThe authentication pattern is indicated via the profile URI which is built using self.auth_pattern.
[ "Clickthrough", "login", "service", "description", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_clickthrough.py#L22-L31
7,246
zimeon/iiif
iiif/request.py
IIIFRequest.clear
def clear(self): """Clear all data that might pertain to an individual IIIF URL. Does not change/reset the baseurl or API version which might be useful in a sequence of calls. """ # API parameters self.identifier = None self.region = None self.size = None self.rotation = None self.quality = None self.format = None self.info = None # Derived data and flags self.region_full = False self.region_square = False self.region_pct = False self.region_xywh = None # (x,y,w,h) self.size_full = False self.size_max = False # new in 2.1 self.size_pct = None self.size_bang = None self.size_wh = None # (w,h) self.rotation_mirror = False self.rotation_deg = 0.0
python
def clear(self): # API parameters self.identifier = None self.region = None self.size = None self.rotation = None self.quality = None self.format = None self.info = None # Derived data and flags self.region_full = False self.region_square = False self.region_pct = False self.region_xywh = None # (x,y,w,h) self.size_full = False self.size_max = False # new in 2.1 self.size_pct = None self.size_bang = None self.size_wh = None # (w,h) self.rotation_mirror = False self.rotation_deg = 0.0
[ "def", "clear", "(", "self", ")", ":", "# API parameters", "self", ".", "identifier", "=", "None", "self", ".", "region", "=", "None", "self", ".", "size", "=", "None", "self", ".", "rotation", "=", "None", "self", ".", "quality", "=", "None", "self", ...
Clear all data that might pertain to an individual IIIF URL. Does not change/reset the baseurl or API version which might be useful in a sequence of calls.
[ "Clear", "all", "data", "that", "might", "pertain", "to", "an", "individual", "IIIF", "URL", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L83-L108
7,247
zimeon/iiif
iiif/request.py
IIIFRequest.api_version
def api_version(self, v): """Set the api_version and associated configurations.""" self._api_version = v if (self._api_version >= '2.0'): self.default_quality = 'default' self.allowed_qualities = ['default', 'color', 'bitonal', 'gray'] else: # versions 1.0 and 1.1 self.default_quality = 'native' self.allowed_qualities = ['native', 'color', 'bitonal', 'grey']
python
def api_version(self, v): self._api_version = v if (self._api_version >= '2.0'): self.default_quality = 'default' self.allowed_qualities = ['default', 'color', 'bitonal', 'gray'] else: # versions 1.0 and 1.1 self.default_quality = 'native' self.allowed_qualities = ['native', 'color', 'bitonal', 'grey']
[ "def", "api_version", "(", "self", ",", "v", ")", ":", "self", ".", "_api_version", "=", "v", "if", "(", "self", ".", "_api_version", ">=", "'2.0'", ")", ":", "self", ".", "default_quality", "=", "'default'", "self", ".", "allowed_qualities", "=", "[", ...
Set the api_version and associated configurations.
[ "Set", "the", "api_version", "and", "associated", "configurations", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L116-L124
7,248
zimeon/iiif
iiif/request.py
IIIFRequest.url
def url(self, **params): """Build a URL path for image or info request. An IIIF Image request with parameterized form is assumed unless the info parameter is specified, in which case an Image Information request URI is constructred. """ self._setattrs(**params) path = self.baseurl + self.quote(self.identifier) + "/" if (self.info): # info request path += "info" format = self.format if self.format else "json" else: # region if self.region: region = self.region elif self.region_xywh: region = "%d,%d,%d,%d" % tuple(self.region_xywh) else: region = "full" # size if self.size: size = self.size elif self.size_wh: if (self.size_wh[0] is None): size = ",%d" % (self.size_wh[1]) elif (self.size_wh[1] is None): size = "%d," % (self.size_wh[0]) else: size = "%d,%d" % (self.size_wh[0], self.size_wh[1]) elif (self.size_max and self.api_version >= '2.1'): size = 'max' else: size = "full" # rotation and quality rotation = self.rotation if self.rotation else "0" quality = self.quality if self.quality else self.default_quality # parameterized form path += self.quote(region) + "/" +\ self.quote(size) + "/" +\ self.quote(rotation) + "/" +\ self.quote(quality) format = self.format if (format): path += "." + format return(path)
python
def url(self, **params): self._setattrs(**params) path = self.baseurl + self.quote(self.identifier) + "/" if (self.info): # info request path += "info" format = self.format if self.format else "json" else: # region if self.region: region = self.region elif self.region_xywh: region = "%d,%d,%d,%d" % tuple(self.region_xywh) else: region = "full" # size if self.size: size = self.size elif self.size_wh: if (self.size_wh[0] is None): size = ",%d" % (self.size_wh[1]) elif (self.size_wh[1] is None): size = "%d," % (self.size_wh[0]) else: size = "%d,%d" % (self.size_wh[0], self.size_wh[1]) elif (self.size_max and self.api_version >= '2.1'): size = 'max' else: size = "full" # rotation and quality rotation = self.rotation if self.rotation else "0" quality = self.quality if self.quality else self.default_quality # parameterized form path += self.quote(region) + "/" +\ self.quote(size) + "/" +\ self.quote(rotation) + "/" +\ self.quote(quality) format = self.format if (format): path += "." + format return(path)
[ "def", "url", "(", "self", ",", "*", "*", "params", ")", ":", "self", ".", "_setattrs", "(", "*", "*", "params", ")", "path", "=", "self", ".", "baseurl", "+", "self", ".", "quote", "(", "self", ".", "identifier", ")", "+", "\"/\"", "if", "(", ...
Build a URL path for image or info request. An IIIF Image request with parameterized form is assumed unless the info parameter is specified, in which case an Image Information request URI is constructred.
[ "Build", "a", "URL", "path", "for", "image", "or", "info", "request", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L148-L194
7,249
zimeon/iiif
iiif/request.py
IIIFRequest.parse_url
def parse_url(self, url): """Parse an IIIF API URL path and each component. Will parse a URL or URL path that accords with either the parametrized or info request forms. Will raise an IIIFRequestError on failure. A wrapper for the split_url() and parse_parameters() methods. Note that behavior of split_url() depends on whether self.identifier is set. """ self.split_url(url) if (not self.info): self.parse_parameters() return(self)
python
def parse_url(self, url): self.split_url(url) if (not self.info): self.parse_parameters() return(self)
[ "def", "parse_url", "(", "self", ",", "url", ")", ":", "self", ".", "split_url", "(", "url", ")", "if", "(", "not", "self", ".", "info", ")", ":", "self", ".", "parse_parameters", "(", ")", "return", "(", "self", ")" ]
Parse an IIIF API URL path and each component. Will parse a URL or URL path that accords with either the parametrized or info request forms. Will raise an IIIFRequestError on failure. A wrapper for the split_url() and parse_parameters() methods. Note that behavior of split_url() depends on whether self.identifier is set.
[ "Parse", "an", "IIIF", "API", "URL", "path", "and", "each", "component", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L196-L210
7,250
zimeon/iiif
iiif/request.py
IIIFRequest.split_url
def split_url(self, url): """Parse an IIIF API URL path into components. Will parse a URL or URL path that accords with either the parametrized or info API forms. Will raise an IIIFRequestError on failure. If self.identifier is set then url is assumed not to include the identifier. """ # clear data first identifier = self.identifier self.clear() # url must start with baseurl if set (including slash) if (self.baseurl is not None): (path, num) = re.subn('^' + self.baseurl, '', url, 1) if (num != 1): raise IIIFRequestError( text="Request URL does not start with base URL") url = path # Break up by path segments, count to decide format segs = url.split('/') if (identifier is not None): segs.insert(0, identifier) elif (self.allow_slashes_in_identifier): segs = self._allow_slashes_in_identifier_munger(segs) # Now have segments with identifier as first if (len(segs) > 5): raise IIIFRequestPathError( text="Request URL (%s) has too many path segments" % url) elif (len(segs) == 5): self.identifier = urlunquote(segs[0]) self.region = urlunquote(segs[1]) self.size = urlunquote(segs[2]) self.rotation = urlunquote(segs[3]) self.quality = self.strip_format(urlunquote(segs[4])) self.info = False elif (len(segs) == 2): self.identifier = urlunquote(segs[0]) info_name = self.strip_format(urlunquote(segs[1])) if (info_name != "info"): raise IIIFRequestError( text="Bad name for Image Information") if (self.api_version == '1.0'): if (self.format not in ['json', 'xml']): raise IIIFRequestError( text="Invalid format for Image Information (json and xml allowed)") elif (self.format != 'json'): raise IIIFRequestError( text="Invalid format for Image Information (only json allowed)") self.info = True elif (len(segs) == 1): self.identifier = urlunquote(segs[0]) raise IIIFRequestBaseURI() else: raise IIIFRequestPathError( text="Bad number of path segments in request") return(self)
python
def split_url(self, url): # clear data first identifier = self.identifier self.clear() # url must start with baseurl if set (including slash) if (self.baseurl is not None): (path, num) = re.subn('^' + self.baseurl, '', url, 1) if (num != 1): raise IIIFRequestError( text="Request URL does not start with base URL") url = path # Break up by path segments, count to decide format segs = url.split('/') if (identifier is not None): segs.insert(0, identifier) elif (self.allow_slashes_in_identifier): segs = self._allow_slashes_in_identifier_munger(segs) # Now have segments with identifier as first if (len(segs) > 5): raise IIIFRequestPathError( text="Request URL (%s) has too many path segments" % url) elif (len(segs) == 5): self.identifier = urlunquote(segs[0]) self.region = urlunquote(segs[1]) self.size = urlunquote(segs[2]) self.rotation = urlunquote(segs[3]) self.quality = self.strip_format(urlunquote(segs[4])) self.info = False elif (len(segs) == 2): self.identifier = urlunquote(segs[0]) info_name = self.strip_format(urlunquote(segs[1])) if (info_name != "info"): raise IIIFRequestError( text="Bad name for Image Information") if (self.api_version == '1.0'): if (self.format not in ['json', 'xml']): raise IIIFRequestError( text="Invalid format for Image Information (json and xml allowed)") elif (self.format != 'json'): raise IIIFRequestError( text="Invalid format for Image Information (only json allowed)") self.info = True elif (len(segs) == 1): self.identifier = urlunquote(segs[0]) raise IIIFRequestBaseURI() else: raise IIIFRequestPathError( text="Bad number of path segments in request") return(self)
[ "def", "split_url", "(", "self", ",", "url", ")", ":", "# clear data first", "identifier", "=", "self", ".", "identifier", "self", ".", "clear", "(", ")", "# url must start with baseurl if set (including slash)", "if", "(", "self", ".", "baseurl", "is", "not", "...
Parse an IIIF API URL path into components. Will parse a URL or URL path that accords with either the parametrized or info API forms. Will raise an IIIFRequestError on failure. If self.identifier is set then url is assumed not to include the identifier.
[ "Parse", "an", "IIIF", "API", "URL", "path", "into", "components", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L228-L285
7,251
zimeon/iiif
iiif/request.py
IIIFRequest.strip_format
def strip_format(self, str_and_format): """Look for optional .fmt at end of URI. The format must start with letter. Note that we want to catch the case of a dot and no format (format='') which is different from no dot (format=None) Sets self.format as side effect, returns possibly modified string """ m = re.match("(.+)\.([a-zA-Z]\w*)$", str_and_format) if (m): # There is a format string at end, chop off and store str_and_format = m.group(1) self.format = (m.group(2) if (m.group(2) is not None) else '') return(str_and_format)
python
def strip_format(self, str_and_format): m = re.match("(.+)\.([a-zA-Z]\w*)$", str_and_format) if (m): # There is a format string at end, chop off and store str_and_format = m.group(1) self.format = (m.group(2) if (m.group(2) is not None) else '') return(str_and_format)
[ "def", "strip_format", "(", "self", ",", "str_and_format", ")", ":", "m", "=", "re", ".", "match", "(", "\"(.+)\\.([a-zA-Z]\\w*)$\"", ",", "str_and_format", ")", "if", "(", "m", ")", ":", "# There is a format string at end, chop off and store", "str_and_format", "="...
Look for optional .fmt at end of URI. The format must start with letter. Note that we want to catch the case of a dot and no format (format='') which is different from no dot (format=None) Sets self.format as side effect, returns possibly modified string
[ "Look", "for", "optional", ".", "fmt", "at", "end", "of", "URI", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L287-L301
7,252
zimeon/iiif
iiif/request.py
IIIFRequest.parse_parameters
def parse_parameters(self): """Parse the parameters of an Image Information request. Will throw an IIIFRequestError on failure, set attributes on success. Care is taken not to change any of the artibutes which store path components. All parsed values are stored in new attributes. """ self.parse_region() self.parse_size() self.parse_rotation() self.parse_quality() self.parse_format()
python
def parse_parameters(self): self.parse_region() self.parse_size() self.parse_rotation() self.parse_quality() self.parse_format()
[ "def", "parse_parameters", "(", "self", ")", ":", "self", ".", "parse_region", "(", ")", "self", ".", "parse_size", "(", ")", "self", ".", "parse_rotation", "(", ")", "self", ".", "parse_quality", "(", ")", "self", ".", "parse_format", "(", ")" ]
Parse the parameters of an Image Information request. Will throw an IIIFRequestError on failure, set attributes on success. Care is taken not to change any of the artibutes which store path components. All parsed values are stored in new attributes.
[ "Parse", "the", "parameters", "of", "an", "Image", "Information", "request", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L303-L315
7,253
zimeon/iiif
iiif/request.py
IIIFRequest.parse_region
def parse_region(self): """Parse the region component of the path. /full/ -> self.region_full = True (test this first) /square/ -> self.region_square = True (test this second) /x,y,w,h/ -> self.region_xywh = (x,y,w,h) /pct:x,y,w,h/ -> self.region_xywh and self.region_pct = True Will throw errors if the parameters are illegal according to the specification but does not know about and thus cannot do any tests against any image being manipulated. """ self.region_full = False self.region_square = False self.region_pct = False if (self.region is None or self.region == 'full'): self.region_full = True return if (self.api_version >= '2.1' and self.region == 'square'): self.region_square = True return xywh = self.region pct_match = re.match('pct:(.*)$', self.region) if (pct_match): xywh = pct_match.group(1) self.region_pct = True # Now whether this was pct: or now, we expect 4 values... str_values = xywh.split(',', 5) if (len(str_values) != 4): raise IIIFRequestError( code=400, parameter="region", text="Bad number of values in region specification, " "must be x,y,w,h but got %d value(s) from '%s'" % (len(str_values), xywh)) values = [] for str_value in str_values: # Must be either integer (not pct) or interger/float (pct) if (pct_match): try: # This is rather more permissive that the iiif spec value = float(str_value) except ValueError: raise IIIFRequestError( parameter="region", text="Bad floating point value for percentage in " "region (%s)." % str_value) if (value > 100.0): raise IIIFRequestError( parameter="region", text="Percentage over value over 100.0 in region " "(%s)." % str_value) else: try: value = int(str_value) except ValueError: raise IIIFRequestError( parameter="region", text="Bad integer value in region (%s)." % str_value) if (value < 0): raise IIIFRequestError( parameter="region", text="Negative values not allowed in region (%s)." % str_value) values.append(value) # Zero size region is w or h are zero (careful that they may be float) if (values[2] == 0.0 or values[3] == 0.0): raise IIIFZeroSizeError( code=400, parameter="region", text="Zero size region specified (%s))." % xywh) self.region_xywh = values
python
def parse_region(self): self.region_full = False self.region_square = False self.region_pct = False if (self.region is None or self.region == 'full'): self.region_full = True return if (self.api_version >= '2.1' and self.region == 'square'): self.region_square = True return xywh = self.region pct_match = re.match('pct:(.*)$', self.region) if (pct_match): xywh = pct_match.group(1) self.region_pct = True # Now whether this was pct: or now, we expect 4 values... str_values = xywh.split(',', 5) if (len(str_values) != 4): raise IIIFRequestError( code=400, parameter="region", text="Bad number of values in region specification, " "must be x,y,w,h but got %d value(s) from '%s'" % (len(str_values), xywh)) values = [] for str_value in str_values: # Must be either integer (not pct) or interger/float (pct) if (pct_match): try: # This is rather more permissive that the iiif spec value = float(str_value) except ValueError: raise IIIFRequestError( parameter="region", text="Bad floating point value for percentage in " "region (%s)." % str_value) if (value > 100.0): raise IIIFRequestError( parameter="region", text="Percentage over value over 100.0 in region " "(%s)." % str_value) else: try: value = int(str_value) except ValueError: raise IIIFRequestError( parameter="region", text="Bad integer value in region (%s)." % str_value) if (value < 0): raise IIIFRequestError( parameter="region", text="Negative values not allowed in region (%s)." % str_value) values.append(value) # Zero size region is w or h are zero (careful that they may be float) if (values[2] == 0.0 or values[3] == 0.0): raise IIIFZeroSizeError( code=400, parameter="region", text="Zero size region specified (%s))." % xywh) self.region_xywh = values
[ "def", "parse_region", "(", "self", ")", ":", "self", ".", "region_full", "=", "False", "self", ".", "region_square", "=", "False", "self", ".", "region_pct", "=", "False", "if", "(", "self", ".", "region", "is", "None", "or", "self", ".", "region", "=...
Parse the region component of the path. /full/ -> self.region_full = True (test this first) /square/ -> self.region_square = True (test this second) /x,y,w,h/ -> self.region_xywh = (x,y,w,h) /pct:x,y,w,h/ -> self.region_xywh and self.region_pct = True Will throw errors if the parameters are illegal according to the specification but does not know about and thus cannot do any tests against any image being manipulated.
[ "Parse", "the", "region", "component", "of", "the", "path", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L317-L386
7,254
zimeon/iiif
iiif/request.py
IIIFRequest.parse_size
def parse_size(self, size=None): """Parse the size component of the path. /full/ -> self.size_full = True /max/ -> self.size_mac = True (2.1 and up) /w,/ -> self.size_wh = (w,None) /,h/ -> self.size_wh = (None,h) /w,h/ -> self.size_wh = (w,h) /pct:p/ -> self.size_pct = p /!w,h/ -> self.size_wh = (w,h), self.size_bang = True Expected use: (w,h) = iiif.size_to_apply(region_w,region_h) if (q is None): # full image else: # scale to w by h Returns (None,None) if no scaling is required. """ if (size is not None): self.size = size self.size_pct = None self.size_bang = False self.size_full = False self.size_wh = (None, None) if (self.size is None or self.size == 'full'): self.size_full = True return elif (self.size == 'max' and self.api_version >= '2.1'): self.size_max = True return pct_match = re.match('pct:(.*)$', self.size) if (pct_match is not None): pct_str = pct_match.group(1) try: self.size_pct = float(pct_str) except ValueError: raise IIIFRequestError( code=400, parameter="size", text="Percentage size value must be a number, got " "'%s'." % (pct_str)) # Note that Image API specificaton places no upper limit on # size so none is implemented here. if (self.size_pct < 0.0): raise IIIFRequestError( code=400, parameter="size", text="Base size percentage, must be > 0.0, got %f." % (self.size_pct)) else: if (self.size[0] == '!'): # Have "!w,h" form size_no_bang = self.size[1:] (mw, mh) = self._parse_w_comma_h(size_no_bang, 'size') if (mw is None or mh is None): raise IIIFRequestError( code=400, parameter="size", text="Illegal size requested: both w,h must be " "specified in !w,h requests.") self.size_wh = (mw, mh) self.size_bang = True else: # Must now be "w,h", "w," or ",h" self.size_wh = self._parse_w_comma_h(self.size, 'size') # Sanity check w,h (w, h) = self.size_wh if ((w is not None and w <= 0) or (h is not None and h <= 0)): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameters request zero size result image.")
python
def parse_size(self, size=None): if (size is not None): self.size = size self.size_pct = None self.size_bang = False self.size_full = False self.size_wh = (None, None) if (self.size is None or self.size == 'full'): self.size_full = True return elif (self.size == 'max' and self.api_version >= '2.1'): self.size_max = True return pct_match = re.match('pct:(.*)$', self.size) if (pct_match is not None): pct_str = pct_match.group(1) try: self.size_pct = float(pct_str) except ValueError: raise IIIFRequestError( code=400, parameter="size", text="Percentage size value must be a number, got " "'%s'." % (pct_str)) # Note that Image API specificaton places no upper limit on # size so none is implemented here. if (self.size_pct < 0.0): raise IIIFRequestError( code=400, parameter="size", text="Base size percentage, must be > 0.0, got %f." % (self.size_pct)) else: if (self.size[0] == '!'): # Have "!w,h" form size_no_bang = self.size[1:] (mw, mh) = self._parse_w_comma_h(size_no_bang, 'size') if (mw is None or mh is None): raise IIIFRequestError( code=400, parameter="size", text="Illegal size requested: both w,h must be " "specified in !w,h requests.") self.size_wh = (mw, mh) self.size_bang = True else: # Must now be "w,h", "w," or ",h" self.size_wh = self._parse_w_comma_h(self.size, 'size') # Sanity check w,h (w, h) = self.size_wh if ((w is not None and w <= 0) or (h is not None and h <= 0)): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameters request zero size result image.")
[ "def", "parse_size", "(", "self", ",", "size", "=", "None", ")", ":", "if", "(", "size", "is", "not", "None", ")", ":", "self", ".", "size", "=", "size", "self", ".", "size_pct", "=", "None", "self", ".", "size_bang", "=", "False", "self", ".", "...
Parse the size component of the path. /full/ -> self.size_full = True /max/ -> self.size_mac = True (2.1 and up) /w,/ -> self.size_wh = (w,None) /,h/ -> self.size_wh = (None,h) /w,h/ -> self.size_wh = (w,h) /pct:p/ -> self.size_pct = p /!w,h/ -> self.size_wh = (w,h), self.size_bang = True Expected use: (w,h) = iiif.size_to_apply(region_w,region_h) if (q is None): # full image else: # scale to w by h Returns (None,None) if no scaling is required.
[ "Parse", "the", "size", "component", "of", "the", "path", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L388-L457
7,255
zimeon/iiif
iiif/request.py
IIIFRequest._parse_w_comma_h
def _parse_w_comma_h(self, whstr, param): """Utility to parse "w,h" "w," or ",h" values. Returns (w,h) where w,h are either None or ineteger. Will throw a ValueError if there is a problem with one or both. """ try: (wstr, hstr) = whstr.split(',', 2) w = self._parse_non_negative_int(wstr, 'w') h = self._parse_non_negative_int(hstr, 'h') except ValueError as e: raise IIIFRequestError( code=400, parameter=param, text="Illegal %s value (%s)." % (param, str(e))) if (w is None and h is None): raise IIIFRequestError( code=400, parameter=param, text="Must specify at least one of w,h for %s." % (param)) return(w, h)
python
def _parse_w_comma_h(self, whstr, param): try: (wstr, hstr) = whstr.split(',', 2) w = self._parse_non_negative_int(wstr, 'w') h = self._parse_non_negative_int(hstr, 'h') except ValueError as e: raise IIIFRequestError( code=400, parameter=param, text="Illegal %s value (%s)." % (param, str(e))) if (w is None and h is None): raise IIIFRequestError( code=400, parameter=param, text="Must specify at least one of w,h for %s." % (param)) return(w, h)
[ "def", "_parse_w_comma_h", "(", "self", ",", "whstr", ",", "param", ")", ":", "try", ":", "(", "wstr", ",", "hstr", ")", "=", "whstr", ".", "split", "(", "','", ",", "2", ")", "w", "=", "self", ".", "_parse_non_negative_int", "(", "wstr", ",", "'w'...
Utility to parse "w,h" "w," or ",h" values. Returns (w,h) where w,h are either None or ineteger. Will throw a ValueError if there is a problem with one or both.
[ "Utility", "to", "parse", "w", "h", "w", "or", "h", "values", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L459-L477
7,256
zimeon/iiif
iiif/request.py
IIIFRequest.parse_quality
def parse_quality(self): """Check quality paramater. Sets self.quality_val based on simple substitution of 'native' for default. Checks for the three valid values else throws an IIIFRequestError. """ if (self.quality is None): self.quality_val = self.default_quality elif (self.quality not in self.allowed_qualities): raise IIIFRequestError( code=400, parameter="quality", text="The quality parameter must be '%s', got '%s'." % ("', '".join(self.allowed_qualities), self.quality)) else: self.quality_val = self.quality
python
def parse_quality(self): if (self.quality is None): self.quality_val = self.default_quality elif (self.quality not in self.allowed_qualities): raise IIIFRequestError( code=400, parameter="quality", text="The quality parameter must be '%s', got '%s'." % ("', '".join(self.allowed_qualities), self.quality)) else: self.quality_val = self.quality
[ "def", "parse_quality", "(", "self", ")", ":", "if", "(", "self", ".", "quality", "is", "None", ")", ":", "self", ".", "quality_val", "=", "self", ".", "default_quality", "elif", "(", "self", ".", "quality", "not", "in", "self", ".", "allowed_qualities",...
Check quality paramater. Sets self.quality_val based on simple substitution of 'native' for default. Checks for the three valid values else throws an IIIFRequestError.
[ "Check", "quality", "paramater", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L531-L546
7,257
zimeon/iiif
iiif/request.py
IIIFRequest.parse_format
def parse_format(self): """Check format parameter. All formats values listed in the specification are lowercase alphanumeric value commonly used as file extensions. To leave opportunity for extension here just do a limited sanity check on characters and length. """ if (self.format is not None and not re.match(r'''\w{1,20}$''', self.format)): raise IIIFRequestError( parameter='format', text='Bad format parameter')
python
def parse_format(self): if (self.format is not None and not re.match(r'''\w{1,20}$''', self.format)): raise IIIFRequestError( parameter='format', text='Bad format parameter')
[ "def", "parse_format", "(", "self", ")", ":", "if", "(", "self", ".", "format", "is", "not", "None", "and", "not", "re", ".", "match", "(", "r'''\\w{1,20}$'''", ",", "self", ".", "format", ")", ")", ":", "raise", "IIIFRequestError", "(", "parameter", "...
Check format parameter. All formats values listed in the specification are lowercase alphanumeric value commonly used as file extensions. To leave opportunity for extension here just do a limited sanity check on characters and length.
[ "Check", "format", "parameter", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L548-L560
7,258
zimeon/iiif
iiif/request.py
IIIFRequest.is_scaled_full_image
def is_scaled_full_image(self): """True if this request is for a scaled full image. To be used to determine whether this request should be used in the set of `sizes` specificed in the Image Information. """ return(self.region_full and self.size_wh[0] is not None and self.size_wh[1] is not None and not self.size_bang and self.rotation_deg == 0.0 and self.quality == self.default_quality and self.format == 'jpg')
python
def is_scaled_full_image(self): return(self.region_full and self.size_wh[0] is not None and self.size_wh[1] is not None and not self.size_bang and self.rotation_deg == 0.0 and self.quality == self.default_quality and self.format == 'jpg')
[ "def", "is_scaled_full_image", "(", "self", ")", ":", "return", "(", "self", ".", "region_full", "and", "self", ".", "size_wh", "[", "0", "]", "is", "not", "None", "and", "self", ".", "size_wh", "[", "1", "]", "is", "not", "None", "and", "not", "self...
True if this request is for a scaled full image. To be used to determine whether this request should be used in the set of `sizes` specificed in the Image Information.
[ "True", "if", "this", "request", "is", "for", "a", "scaled", "full", "image", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/request.py#L562-L574
7,259
zimeon/iiif
iiif_reference_server.py
create_reference_server_flask_app
def create_reference_server_flask_app(cfg): """Create referece server Flask application with one or more IIIF handlers.""" # Create Flask app app = Flask(__name__) Flask.secret_key = "SECRET_HERE" app.debug = cfg.debug # Install request handlers client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' # Set same prefix on local server as expected on iiif.io handler_config.prefix = "api/image/%s/example/reference" % (api_version) handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) return app
python
def create_reference_server_flask_app(cfg): # Create Flask app app = Flask(__name__) Flask.secret_key = "SECRET_HERE" app.debug = cfg.debug # Install request handlers client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' # Set same prefix on local server as expected on iiif.io handler_config.prefix = "api/image/%s/example/reference" % (api_version) handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) return app
[ "def", "create_reference_server_flask_app", "(", "cfg", ")", ":", "# Create Flask app", "app", "=", "Flask", "(", "__name__", ")", "Flask", ".", "secret_key", "=", "\"SECRET_HERE\"", "app", ".", "debug", "=", "cfg", ".", "debug", "# Install request handlers", "cli...
Create referece server Flask application with one or more IIIF handlers.
[ "Create", "referece", "server", "Flask", "application", "with", "one", "or", "more", "IIIF", "handlers", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif_reference_server.py#L59-L76
7,260
zimeon/iiif
iiif/info.py
IIIFInfo.id
def id(self): """id property based on server_and_prefix and identifier.""" id = '' if (self.server_and_prefix is not None and self.server_and_prefix != ''): id += self.server_and_prefix + '/' if (self.identifier is not None): id += self.identifier return id
python
def id(self): id = '' if (self.server_and_prefix is not None and self.server_and_prefix != ''): id += self.server_and_prefix + '/' if (self.identifier is not None): id += self.identifier return id
[ "def", "id", "(", "self", ")", ":", "id", "=", "''", "if", "(", "self", ".", "server_and_prefix", "is", "not", "None", "and", "self", ".", "server_and_prefix", "!=", "''", ")", ":", "id", "+=", "self", ".", "server_and_prefix", "+", "'/'", "if", "(",...
id property based on server_and_prefix and identifier.
[ "id", "property", "based", "on", "server_and_prefix", "and", "identifier", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L227-L235
7,261
zimeon/iiif
iiif/info.py
IIIFInfo.id
def id(self, value): """Split into server_and_prefix and identifier.""" i = value.rfind('/') if (i > 0): self.server_and_prefix = value[:i] self.identifier = value[(i + 1):] elif (i == 0): self.server_and_prefix = '' self.identifier = value[(i + 1):] else: self.server_and_prefix = '' self.identifier = value
python
def id(self, value): i = value.rfind('/') if (i > 0): self.server_and_prefix = value[:i] self.identifier = value[(i + 1):] elif (i == 0): self.server_and_prefix = '' self.identifier = value[(i + 1):] else: self.server_and_prefix = '' self.identifier = value
[ "def", "id", "(", "self", ",", "value", ")", ":", "i", "=", "value", ".", "rfind", "(", "'/'", ")", "if", "(", "i", ">", "0", ")", ":", "self", ".", "server_and_prefix", "=", "value", "[", ":", "i", "]", "self", ".", "identifier", "=", "value",...
Split into server_and_prefix and identifier.
[ "Split", "into", "server_and_prefix", "and", "identifier", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L238-L249
7,262
zimeon/iiif
iiif/info.py
IIIFInfo.set_version_info
def set_version_info(self, api_version=None): """Set up normal values for given api_version. Will use current value of self.api_version if a version number is not specified in the call. Will raise an IIIFInfoError """ if (api_version is None): api_version = self.api_version if (api_version not in CONF): raise IIIFInfoError("Unknown API version %s" % (api_version)) self.params = CONF[api_version]['params'] self.array_params = CONF[api_version]['array_params'] self.complex_params = CONF[api_version]['complex_params'] for a in ('context', 'compliance_prefix', 'compliance_suffix', 'protocol', 'required_params'): if (a in CONF[api_version]): self._setattr(a, CONF[api_version][a])
python
def set_version_info(self, api_version=None): if (api_version is None): api_version = self.api_version if (api_version not in CONF): raise IIIFInfoError("Unknown API version %s" % (api_version)) self.params = CONF[api_version]['params'] self.array_params = CONF[api_version]['array_params'] self.complex_params = CONF[api_version]['complex_params'] for a in ('context', 'compliance_prefix', 'compliance_suffix', 'protocol', 'required_params'): if (a in CONF[api_version]): self._setattr(a, CONF[api_version][a])
[ "def", "set_version_info", "(", "self", ",", "api_version", "=", "None", ")", ":", "if", "(", "api_version", "is", "None", ")", ":", "api_version", "=", "self", ".", "api_version", "if", "(", "api_version", "not", "in", "CONF", ")", ":", "raise", "IIIFIn...
Set up normal values for given api_version. Will use current value of self.api_version if a version number is not specified in the call. Will raise an IIIFInfoError
[ "Set", "up", "normal", "values", "for", "given", "api_version", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L251-L267
7,263
zimeon/iiif
iiif/info.py
IIIFInfo.compliance
def compliance(self, value): """Set the compliance profile URI.""" if (self.api_version < '2.0'): self.profile = value else: try: self.profile[0] = value except AttributeError: # handle case where profile not initialized as array self.profile = [value]
python
def compliance(self, value): if (self.api_version < '2.0'): self.profile = value else: try: self.profile[0] = value except AttributeError: # handle case where profile not initialized as array self.profile = [value]
[ "def", "compliance", "(", "self", ",", "value", ")", ":", "if", "(", "self", ".", "api_version", "<", "'2.0'", ")", ":", "self", ".", "profile", "=", "value", "else", ":", "try", ":", "self", ".", "profile", "[", "0", "]", "=", "value", "except", ...
Set the compliance profile URI.
[ "Set", "the", "compliance", "profile", "URI", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L284-L293
7,264
zimeon/iiif
iiif/info.py
IIIFInfo.level
def level(self): """Extract level number from compliance profile URI. Returns integer level number or raises IIIFInfoError """ m = re.match( self.compliance_prefix + r'(\d)' + self.compliance_suffix + r'$', self.compliance) if (m): return int(m.group(1)) raise IIIFInfoError( "Bad compliance profile URI, failed to extract level number")
python
def level(self): m = re.match( self.compliance_prefix + r'(\d)' + self.compliance_suffix + r'$', self.compliance) if (m): return int(m.group(1)) raise IIIFInfoError( "Bad compliance profile URI, failed to extract level number")
[ "def", "level", "(", "self", ")", ":", "m", "=", "re", ".", "match", "(", "self", ".", "compliance_prefix", "+", "r'(\\d)'", "+", "self", ".", "compliance_suffix", "+", "r'$'", ",", "self", ".", "compliance", ")", "if", "(", "m", ")", ":", "return", ...
Extract level number from compliance profile URI. Returns integer level number or raises IIIFInfoError
[ "Extract", "level", "number", "from", "compliance", "profile", "URI", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L296-L310
7,265
zimeon/iiif
iiif/info.py
IIIFInfo.level
def level(self, value): """Build profile URI from level. Level should be an integer 0,1,2 """ self.compliance = self.compliance_prefix + \ ("%d" % value) + self.compliance_suffix
python
def level(self, value): self.compliance = self.compliance_prefix + \ ("%d" % value) + self.compliance_suffix
[ "def", "level", "(", "self", ",", "value", ")", ":", "self", ".", "compliance", "=", "self", ".", "compliance_prefix", "+", "(", "\"%d\"", "%", "value", ")", "+", "self", ".", "compliance_suffix" ]
Build profile URI from level. Level should be an integer 0,1,2
[ "Build", "profile", "URI", "from", "level", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L313-L319
7,266
zimeon/iiif
iiif/info.py
IIIFInfo.add_service
def add_service(self, service): """Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple """ if (self.service is None): self.service = service elif (isinstance(self.service, dict)): self.service = [self.service, service] else: self.service.append(service)
python
def add_service(self, service): if (self.service is None): self.service = service elif (isinstance(self.service, dict)): self.service = [self.service, service] else: self.service.append(service)
[ "def", "add_service", "(", "self", ",", "service", ")", ":", "if", "(", "self", ".", "service", "is", "None", ")", ":", "self", ".", "service", "=", "service", "elif", "(", "isinstance", "(", "self", ".", "service", ",", "dict", ")", ")", ":", "sel...
Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple
[ "Add", "a", "service", "description", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L391-L402
7,267
zimeon/iiif
iiif/info.py
IIIFInfo.validate
def validate(self): """Validate this object as Image API data. Raise IIIFInfoError with helpful message if not valid. """ errors = [] for param in self.required_params: if (not hasattr(self, param) or getattr(self, param) is None): errors.append("missing %s parameter" % (param)) if (len(errors) > 0): raise IIIFInfoError("Bad data for info.json: " + ", ".join(errors)) return True
python
def validate(self): errors = [] for param in self.required_params: if (not hasattr(self, param) or getattr(self, param) is None): errors.append("missing %s parameter" % (param)) if (len(errors) > 0): raise IIIFInfoError("Bad data for info.json: " + ", ".join(errors)) return True
[ "def", "validate", "(", "self", ")", ":", "errors", "=", "[", "]", "for", "param", "in", "self", ".", "required_params", ":", "if", "(", "not", "hasattr", "(", "self", ",", "param", ")", "or", "getattr", "(", "self", ",", "param", ")", "is", "None"...
Validate this object as Image API data. Raise IIIFInfoError with helpful message if not valid.
[ "Validate", "this", "object", "as", "Image", "API", "data", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L417-L428
7,268
zimeon/iiif
iiif/info.py
IIIFInfo.as_json
def as_json(self, validate=True): """Return JSON serialization. Will raise IIIFInfoError if insufficient parameters are present to have a valid info.json response (unless validate is False). """ if (validate): self.validate() json_dict = {} if (self.api_version > '1.0'): json_dict['@context'] = self.context params_to_write = set(self.params) params_to_write.discard('identifier') if (self.identifier): if (self.api_version == '1.0'): json_dict['identifier'] = self.identifier # local id else: json_dict['@id'] = self.id # URI params_to_write.discard('profile') if (self.compliance): if (self.api_version < '2.0'): json_dict['profile'] = self.compliance else: # FIXME - need to support extra profile features json_dict['profile'] = [self.compliance] d = {} if (self.formats is not None): d['formats'] = self.formats if (self.qualities is not None): d['qualities'] = self.qualities if (self.supports is not None): d['supports'] = self.supports if (len(d) > 0): json_dict['profile'].append(d) params_to_write.discard('formats') params_to_write.discard('qualities') params_to_write.discard('supports') for param in params_to_write: if (hasattr(self, param) and getattr(self, param) is not None): json_dict[param] = getattr(self, param) return(json.dumps(json_dict, sort_keys=True, indent=2))
python
def as_json(self, validate=True): if (validate): self.validate() json_dict = {} if (self.api_version > '1.0'): json_dict['@context'] = self.context params_to_write = set(self.params) params_to_write.discard('identifier') if (self.identifier): if (self.api_version == '1.0'): json_dict['identifier'] = self.identifier # local id else: json_dict['@id'] = self.id # URI params_to_write.discard('profile') if (self.compliance): if (self.api_version < '2.0'): json_dict['profile'] = self.compliance else: # FIXME - need to support extra profile features json_dict['profile'] = [self.compliance] d = {} if (self.formats is not None): d['formats'] = self.formats if (self.qualities is not None): d['qualities'] = self.qualities if (self.supports is not None): d['supports'] = self.supports if (len(d) > 0): json_dict['profile'].append(d) params_to_write.discard('formats') params_to_write.discard('qualities') params_to_write.discard('supports') for param in params_to_write: if (hasattr(self, param) and getattr(self, param) is not None): json_dict[param] = getattr(self, param) return(json.dumps(json_dict, sort_keys=True, indent=2))
[ "def", "as_json", "(", "self", ",", "validate", "=", "True", ")", ":", "if", "(", "validate", ")", ":", "self", ".", "validate", "(", ")", "json_dict", "=", "{", "}", "if", "(", "self", ".", "api_version", ">", "'1.0'", ")", ":", "json_dict", "[", ...
Return JSON serialization. Will raise IIIFInfoError if insufficient parameters are present to have a valid info.json response (unless validate is False).
[ "Return", "JSON", "serialization", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L430-L471
7,269
zimeon/iiif
iiif/info.py
IIIFInfo.read
def read(self, fh, api_version=None): """Read info.json from file like object. Parameters: fh -- file like object supporting fh.read() api_version -- IIIF Image API version expected If api_version is set then the parsing will assume this API version, else the version will be determined from the incoming data. NOTE that the value of self.api_version is NOT used in this routine. If an api_version is specified and there is a @context specified then an IIIFInfoError will be raised unless these match. If no known @context is present and no api_version set then an IIIFInfoError will be raised. """ j = json.load(fh) # # @context and API version self.context = None if (api_version == '1.0'): # v1.0 did not have a @context so we simply take the version # passed in self.api_version = api_version elif ('@context' in j): # determine API version from context self.context = j['@context'] api_version_read = None for v in CONF: if (v > '1.0' and self.context == CONF[v]['context']): api_version_read = v break if (api_version_read is None): raise IIIFInfoError( "Unknown @context, cannot determine API version (%s)" % (self.context)) else: if (api_version is not None and api_version != api_version_read): raise IIIFInfoError( "Expected API version '%s' but got @context for API version '%s'" % (api_version, api_version_read)) else: self.api_version = api_version_read else: # no @context and not 1.0 if (api_version is None): raise IIIFInfoError("No @context (and no default given)") self.api_version = api_version self.set_version_info() # # @id or identifier if (self.api_version == '1.0'): if ('identifier' in j): self.id = j['identifier'] else: raise IIIFInfoError("Missing identifier in info.json") else: if ('@id' in j): self.id = j['@id'] else: raise IIIFInfoError("Missing @id in info.json") # # other params for param in self.params: if (param == 'identifier'): continue # dealt with above if (param in j): if (param in self.complex_params): # use function ref in complex_params to parse, optional # dst to map to a different property name self._setattr(param, self.complex_params[ param](self, j[param])) else: self._setattr(param, j[param]) return True
python
def read(self, fh, api_version=None): j = json.load(fh) # # @context and API version self.context = None if (api_version == '1.0'): # v1.0 did not have a @context so we simply take the version # passed in self.api_version = api_version elif ('@context' in j): # determine API version from context self.context = j['@context'] api_version_read = None for v in CONF: if (v > '1.0' and self.context == CONF[v]['context']): api_version_read = v break if (api_version_read is None): raise IIIFInfoError( "Unknown @context, cannot determine API version (%s)" % (self.context)) else: if (api_version is not None and api_version != api_version_read): raise IIIFInfoError( "Expected API version '%s' but got @context for API version '%s'" % (api_version, api_version_read)) else: self.api_version = api_version_read else: # no @context and not 1.0 if (api_version is None): raise IIIFInfoError("No @context (and no default given)") self.api_version = api_version self.set_version_info() # # @id or identifier if (self.api_version == '1.0'): if ('identifier' in j): self.id = j['identifier'] else: raise IIIFInfoError("Missing identifier in info.json") else: if ('@id' in j): self.id = j['@id'] else: raise IIIFInfoError("Missing @id in info.json") # # other params for param in self.params: if (param == 'identifier'): continue # dealt with above if (param in j): if (param in self.complex_params): # use function ref in complex_params to parse, optional # dst to map to a different property name self._setattr(param, self.complex_params[ param](self, j[param])) else: self._setattr(param, j[param]) return True
[ "def", "read", "(", "self", ",", "fh", ",", "api_version", "=", "None", ")", ":", "j", "=", "json", ".", "load", "(", "fh", ")", "#", "# @context and API version", "self", ".", "context", "=", "None", "if", "(", "api_version", "==", "'1.0'", ")", ":"...
Read info.json from file like object. Parameters: fh -- file like object supporting fh.read() api_version -- IIIF Image API version expected If api_version is set then the parsing will assume this API version, else the version will be determined from the incoming data. NOTE that the value of self.api_version is NOT used in this routine. If an api_version is specified and there is a @context specified then an IIIFInfoError will be raised unless these match. If no known @context is present and no api_version set then an IIIFInfoError will be raised.
[ "Read", "info", ".", "json", "from", "file", "like", "object", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L473-L546
7,270
zimeon/iiif
iiif/manipulator_pil.py
IIIFManipulatorPIL.set_max_image_pixels
def set_max_image_pixels(self, pixels): """Set PIL limit on pixel size of images to load if non-zero. WARNING: This is a global setting in PIL, it is not local to this manipulator instance! Setting a value here will not only set the given limit but also convert the PIL "DecompressionBombWarning" into an error. Thus setting a moderate limit sets a hard limit on image size loaded, setting a very large limit will have the effect of disabling the warning. """ if (pixels): Image.MAX_IMAGE_PIXELS = pixels Image.warnings.simplefilter( 'error', Image.DecompressionBombWarning)
python
def set_max_image_pixels(self, pixels): if (pixels): Image.MAX_IMAGE_PIXELS = pixels Image.warnings.simplefilter( 'error', Image.DecompressionBombWarning)
[ "def", "set_max_image_pixels", "(", "self", ",", "pixels", ")", ":", "if", "(", "pixels", ")", ":", "Image", ".", "MAX_IMAGE_PIXELS", "=", "pixels", "Image", ".", "warnings", ".", "simplefilter", "(", "'error'", ",", "Image", ".", "DecompressionBombWarning", ...
Set PIL limit on pixel size of images to load if non-zero. WARNING: This is a global setting in PIL, it is not local to this manipulator instance! Setting a value here will not only set the given limit but also convert the PIL "DecompressionBombWarning" into an error. Thus setting a moderate limit sets a hard limit on image size loaded, setting a very large limit will have the effect of disabling the warning.
[ "Set", "PIL", "limit", "on", "pixel", "size", "of", "images", "to", "load", "if", "non", "-", "zero", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_pil.py#L42-L57
7,271
zimeon/iiif
iiif/manipulator_pil.py
IIIFManipulatorPIL.do_first
def do_first(self): """Create PIL object from input image file. Image location must be in self.srcfile. Will result in self.width and self.height being set to the image dimensions. Will raise an IIIFError on failure to load the image """ self.logger.debug("do_first: src=%s" % (self.srcfile)) try: self.image = Image.open(self.srcfile) except Image.DecompressionBombWarning as e: # This exception will be raised only if PIL has been # configured to raise an error in the case of images # that exceeed Image.MAX_IMAGE_PIXELS, with # Image.warnings.simplefilter('error', Image.DecompressionBombWarning) raise IIIFError(text=("Image size limit exceeded (PIL: %s)" % (str(e)))) except Exception as e: raise IIIFError(text=("Failed to read image (PIL: %s)" % (str(e)))) (self.width, self.height) = self.image.size
python
def do_first(self): self.logger.debug("do_first: src=%s" % (self.srcfile)) try: self.image = Image.open(self.srcfile) except Image.DecompressionBombWarning as e: # This exception will be raised only if PIL has been # configured to raise an error in the case of images # that exceeed Image.MAX_IMAGE_PIXELS, with # Image.warnings.simplefilter('error', Image.DecompressionBombWarning) raise IIIFError(text=("Image size limit exceeded (PIL: %s)" % (str(e)))) except Exception as e: raise IIIFError(text=("Failed to read image (PIL: %s)" % (str(e)))) (self.width, self.height) = self.image.size
[ "def", "do_first", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"do_first: src=%s\"", "%", "(", "self", ".", "srcfile", ")", ")", "try", ":", "self", ".", "image", "=", "Image", ".", "open", "(", "self", ".", "srcfile", ")", "...
Create PIL object from input image file. Image location must be in self.srcfile. Will result in self.width and self.height being set to the image dimensions. Will raise an IIIFError on failure to load the image
[ "Create", "PIL", "object", "from", "input", "image", "file", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_pil.py#L59-L78
7,272
zimeon/iiif
iiif/flask_utils.py
html_page
def html_page(title="Page Title", body=""): """Create HTML page as string.""" html = "<html>\n<head><title>%s</title></head>\n<body>\n" % (title) html += "<h1>%s</h1>\n" % (title) html += body html += "</body>\n</html>\n" return html
python
def html_page(title="Page Title", body=""): html = "<html>\n<head><title>%s</title></head>\n<body>\n" % (title) html += "<h1>%s</h1>\n" % (title) html += body html += "</body>\n</html>\n" return html
[ "def", "html_page", "(", "title", "=", "\"Page Title\"", ",", "body", "=", "\"\"", ")", ":", "html", "=", "\"<html>\\n<head><title>%s</title></head>\\n<body>\\n\"", "%", "(", "title", ")", "html", "+=", "\"<h1>%s</h1>\\n\"", "%", "(", "title", ")", "html", "+=",...
Create HTML page as string.
[ "Create", "HTML", "page", "as", "string", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L49-L55
7,273
zimeon/iiif
iiif/flask_utils.py
top_level_index_page
def top_level_index_page(config): """HTML top-level index page which provides a link to each handler.""" title = "IIIF Test Server on %s" % (config.host) body = "<ul>\n" for prefix in sorted(config.prefixes.keys()): body += '<li><a href="/%s">%s</a></li>\n' % (prefix, prefix) body += "</ul>\n" return html_page(title, body)
python
def top_level_index_page(config): title = "IIIF Test Server on %s" % (config.host) body = "<ul>\n" for prefix in sorted(config.prefixes.keys()): body += '<li><a href="/%s">%s</a></li>\n' % (prefix, prefix) body += "</ul>\n" return html_page(title, body)
[ "def", "top_level_index_page", "(", "config", ")", ":", "title", "=", "\"IIIF Test Server on %s\"", "%", "(", "config", ".", "host", ")", "body", "=", "\"<ul>\\n\"", "for", "prefix", "in", "sorted", "(", "config", ".", "prefixes", ".", "keys", "(", ")", ")...
HTML top-level index page which provides a link to each handler.
[ "HTML", "top", "-", "level", "index", "page", "which", "provides", "a", "link", "to", "each", "handler", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L58-L65
7,274
zimeon/iiif
iiif/flask_utils.py
identifiers
def identifiers(config): """Show list of identifiers for this prefix. Handles both the case of local file based identifiers and also image generators. Arguments: config - configuration object in which: config.klass_name - 'gen' if a generator function config.generator_dir - directory for generator code config.image_dir - directory for images Returns: ids - a list of ids """ ids = [] if (config.klass_name == 'gen'): for generator in os.listdir(config.generator_dir): if (generator == '__init__.py'): continue (gid, ext) = os.path.splitext(generator) if (ext == '.py' and os.path.isfile(os.path.join(config.generator_dir, generator))): ids.append(gid) else: for image_file in os.listdir(config.image_dir): (iid, ext) = os.path.splitext(image_file) if (ext in ['.jpg', '.png', '.tif'] and os.path.isfile(os.path.join(config.image_dir, image_file))): ids.append(iid) return ids
python
def identifiers(config): ids = [] if (config.klass_name == 'gen'): for generator in os.listdir(config.generator_dir): if (generator == '__init__.py'): continue (gid, ext) = os.path.splitext(generator) if (ext == '.py' and os.path.isfile(os.path.join(config.generator_dir, generator))): ids.append(gid) else: for image_file in os.listdir(config.image_dir): (iid, ext) = os.path.splitext(image_file) if (ext in ['.jpg', '.png', '.tif'] and os.path.isfile(os.path.join(config.image_dir, image_file))): ids.append(iid) return ids
[ "def", "identifiers", "(", "config", ")", ":", "ids", "=", "[", "]", "if", "(", "config", ".", "klass_name", "==", "'gen'", ")", ":", "for", "generator", "in", "os", ".", "listdir", "(", "config", ".", "generator_dir", ")", ":", "if", "(", "generator...
Show list of identifiers for this prefix. Handles both the case of local file based identifiers and also image generators. Arguments: config - configuration object in which: config.klass_name - 'gen' if a generator function config.generator_dir - directory for generator code config.image_dir - directory for images Returns: ids - a list of ids
[ "Show", "list", "of", "identifiers", "for", "this", "prefix", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L68-L98
7,275
zimeon/iiif
iiif/flask_utils.py
prefix_index_page
def prefix_index_page(config): """HTML index page for a specific prefix. The prefix seen by the client is obtained from config.client_prefix as opposed to the local server prefix in config.prefix. Also uses the identifiers(config) function to get identifiers available. Arguments: config - configuration object in which: config.client_prefix - URI path prefix seen by client config.host - URI host seen by client config.api_version - string for api_version config.manipulator - string manipulator type config.auth_type - string for auth type config.include_osd - whether OSD is included """ title = "IIIF Image API services under %s" % (config.client_prefix) # details of this prefix handler body = '<p>\n' body += 'host = %s<br/>\n' % (config.host) body += 'api_version = %s<br/>\n' % (config.api_version) body += 'manipulator = %s<br/>\n' % (config.klass_name) body += 'auth_type = %s\n</p>\n' % (config.auth_type) # table of identifiers and example requests ids = identifiers(config) api_version = config.api_version default = 'native' if api_version < '2.0' else 'default' body += '<table border="1">\n<tr><th align="left">Image identifier</th>' body += '<th> </th><th>full</th>' if (config.klass_name != 'dummy'): body += '<th>256,256</th>' body += '<th>30deg</th>' if (config.include_osd): body += '<th> </th>' body += "</tr>\n" for identifier in sorted(ids): base = urljoin('/', config.client_prefix + '/' + identifier) body += '<tr><th align="left">%s</th>' % (identifier) info = base + "/info.json" body += '<td><a href="%s">%s</a></td>' % (info, 'info') suffix = "full/full/0/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) if (config.klass_name != 'dummy'): suffix = "full/256,256/0/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) suffix = "full/100,/30/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) if (config.include_osd): body += '<td><a href="%s/osd.html">OSD</a></td>' % (base) body += "</tr>\n" body += "</table<\n" return html_page(title, body)
python
def prefix_index_page(config): title = "IIIF Image API services under %s" % (config.client_prefix) # details of this prefix handler body = '<p>\n' body += 'host = %s<br/>\n' % (config.host) body += 'api_version = %s<br/>\n' % (config.api_version) body += 'manipulator = %s<br/>\n' % (config.klass_name) body += 'auth_type = %s\n</p>\n' % (config.auth_type) # table of identifiers and example requests ids = identifiers(config) api_version = config.api_version default = 'native' if api_version < '2.0' else 'default' body += '<table border="1">\n<tr><th align="left">Image identifier</th>' body += '<th> </th><th>full</th>' if (config.klass_name != 'dummy'): body += '<th>256,256</th>' body += '<th>30deg</th>' if (config.include_osd): body += '<th> </th>' body += "</tr>\n" for identifier in sorted(ids): base = urljoin('/', config.client_prefix + '/' + identifier) body += '<tr><th align="left">%s</th>' % (identifier) info = base + "/info.json" body += '<td><a href="%s">%s</a></td>' % (info, 'info') suffix = "full/full/0/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) if (config.klass_name != 'dummy'): suffix = "full/256,256/0/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) suffix = "full/100,/30/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) if (config.include_osd): body += '<td><a href="%s/osd.html">OSD</a></td>' % (base) body += "</tr>\n" body += "</table<\n" return html_page(title, body)
[ "def", "prefix_index_page", "(", "config", ")", ":", "title", "=", "\"IIIF Image API services under %s\"", "%", "(", "config", ".", "client_prefix", ")", "# details of this prefix handler", "body", "=", "'<p>\\n'", "body", "+=", "'host = %s<br/>\\n'", "%", "(", "confi...
HTML index page for a specific prefix. The prefix seen by the client is obtained from config.client_prefix as opposed to the local server prefix in config.prefix. Also uses the identifiers(config) function to get identifiers available. Arguments: config - configuration object in which: config.client_prefix - URI path prefix seen by client config.host - URI host seen by client config.api_version - string for api_version config.manipulator - string manipulator type config.auth_type - string for auth type config.include_osd - whether OSD is included
[ "HTML", "index", "page", "for", "a", "specific", "prefix", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L101-L152
7,276
zimeon/iiif
iiif/flask_utils.py
osd_page_handler
def osd_page_handler(config=None, identifier=None, prefix=None, **args): """Flask handler to produce HTML response for OpenSeadragon view of identifier. Arguments: config - Config object for this IIIF handler identifier - identifier of image/generator prefix - path prefix **args - other aguments ignored """ template_dir = os.path.join(os.path.dirname(__file__), 'templates') with open(os.path.join(template_dir, 'testserver_osd.html'), 'r') as f: template = f.read() d = dict(prefix=prefix, identifier=identifier, api_version=config.api_version, osd_version='2.0.0', osd_uri='/openseadragon200/openseadragon.min.js', osd_images_prefix='/openseadragon200/images', osd_height=500, osd_width=500, info_json_uri='info.json') return make_response(Template(template).safe_substitute(d))
python
def osd_page_handler(config=None, identifier=None, prefix=None, **args): template_dir = os.path.join(os.path.dirname(__file__), 'templates') with open(os.path.join(template_dir, 'testserver_osd.html'), 'r') as f: template = f.read() d = dict(prefix=prefix, identifier=identifier, api_version=config.api_version, osd_version='2.0.0', osd_uri='/openseadragon200/openseadragon.min.js', osd_images_prefix='/openseadragon200/images', osd_height=500, osd_width=500, info_json_uri='info.json') return make_response(Template(template).safe_substitute(d))
[ "def", "osd_page_handler", "(", "config", "=", "None", ",", "identifier", "=", "None", ",", "prefix", "=", "None", ",", "*", "*", "args", ")", ":", "template_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "_...
Flask handler to produce HTML response for OpenSeadragon view of identifier. Arguments: config - Config object for this IIIF handler identifier - identifier of image/generator prefix - path prefix **args - other aguments ignored
[ "Flask", "handler", "to", "produce", "HTML", "response", "for", "OpenSeadragon", "view", "of", "identifier", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L168-L189
7,277
zimeon/iiif
iiif/flask_utils.py
iiif_info_handler
def iiif_info_handler(prefix=None, identifier=None, config=None, klass=None, auth=None, **args): """Handler for IIIF Image Information requests.""" if (not auth or degraded_request(identifier) or auth.info_authz()): # go ahead with request as made if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_information_response() except IIIFError as e: return i.error_response(e) elif (auth.info_authn()): # authn but not authz -> 401 abort(401) else: # redirect to degraded response = redirect(host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/info.json') response.headers['Access-control-allow-origin'] = '*' return response
python
def iiif_info_handler(prefix=None, identifier=None, config=None, klass=None, auth=None, **args): if (not auth or degraded_request(identifier) or auth.info_authz()): # go ahead with request as made if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_information_response() except IIIFError as e: return i.error_response(e) elif (auth.info_authn()): # authn but not authz -> 401 abort(401) else: # redirect to degraded response = redirect(host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/info.json') response.headers['Access-control-allow-origin'] = '*' return response
[ "def", "iiif_info_handler", "(", "prefix", "=", "None", ",", "identifier", "=", "None", ",", "config", "=", "None", ",", "klass", "=", "None", ",", "auth", "=", "None", ",", "*", "*", "args", ")", ":", "if", "(", "not", "auth", "or", "degraded_reques...
Handler for IIIF Image Information requests.
[ "Handler", "for", "IIIF", "Image", "Information", "requests", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L384-L404
7,278
zimeon/iiif
iiif/flask_utils.py
iiif_image_handler
def iiif_image_handler(prefix=None, identifier=None, path=None, config=None, klass=None, auth=None, **args): """Handler for IIIF Image Requests. Behaviour for case of a non-authn or non-authz case is to return 403. """ if (not auth or degraded_request(identifier) or auth.image_authz()): # serve image if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_request_response(path) except IIIFError as e: return i.error_response(e) else: # redirect to degraded (for not authz and for authn but not authz too) degraded_uri = host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/' + path logging.info("Redirection to degraded: %s" % degraded_uri) response = redirect(degraded_uri) response.headers['Access-control-allow-origin'] = '*' return response
python
def iiif_image_handler(prefix=None, identifier=None, path=None, config=None, klass=None, auth=None, **args): if (not auth or degraded_request(identifier) or auth.image_authz()): # serve image if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_request_response(path) except IIIFError as e: return i.error_response(e) else: # redirect to degraded (for not authz and for authn but not authz too) degraded_uri = host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/' + path logging.info("Redirection to degraded: %s" % degraded_uri) response = redirect(degraded_uri) response.headers['Access-control-allow-origin'] = '*' return response
[ "def", "iiif_image_handler", "(", "prefix", "=", "None", ",", "identifier", "=", "None", ",", "path", "=", "None", ",", "config", "=", "None", ",", "klass", "=", "None", ",", "auth", "=", "None", ",", "*", "*", "args", ")", ":", "if", "(", "not", ...
Handler for IIIF Image Requests. Behaviour for case of a non-authn or non-authz case is to return 403.
[ "Handler", "for", "IIIF", "Image", "Requests", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L408-L431
7,279
zimeon/iiif
iiif/flask_utils.py
parse_accept_header
def parse_accept_header(accept): """Parse an HTTP Accept header. Parses *accept*, returning a list with pairs of (media_type, q_value), ordered by q values. Adapted from <https://djangosnippets.org/snippets/1042/> """ result = [] for media_range in accept.split(","): parts = media_range.split(";") media_type = parts.pop(0).strip() media_params = [] q = 1.0 for part in parts: (key, value) = part.lstrip().split("=", 1) if key == "q": q = float(value) else: media_params.append((key, value)) result.append((media_type, tuple(media_params), q)) result.sort(key=lambda x: -x[2]) return result
python
def parse_accept_header(accept): result = [] for media_range in accept.split(","): parts = media_range.split(";") media_type = parts.pop(0).strip() media_params = [] q = 1.0 for part in parts: (key, value) = part.lstrip().split("=", 1) if key == "q": q = float(value) else: media_params.append((key, value)) result.append((media_type, tuple(media_params), q)) result.sort(key=lambda x: -x[2]) return result
[ "def", "parse_accept_header", "(", "accept", ")", ":", "result", "=", "[", "]", "for", "media_range", "in", "accept", ".", "split", "(", "\",\"", ")", ":", "parts", "=", "media_range", ".", "split", "(", "\";\"", ")", "media_type", "=", "parts", ".", "...
Parse an HTTP Accept header. Parses *accept*, returning a list with pairs of (media_type, q_value), ordered by q values. Adapted from <https://djangosnippets.org/snippets/1042/>
[ "Parse", "an", "HTTP", "Accept", "header", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L450-L472
7,280
zimeon/iiif
iiif/flask_utils.py
parse_authorization_header
def parse_authorization_header(value): """Parse the Authenticate header. Returns nothing on failure, opts hash on success with type='basic' or 'digest' and other params. <http://nullege.com/codes/search/werkzeug.http.parse_authorization_header> <http://stackoverflow.com/questions/1349367/parse-an-http-request-authorization-header-with-python> <http://bugs.python.org/file34041/0001-Add-an-authorization-header-to-the-initial-request.patch> """ try: (auth_type, auth_info) = value.split(' ', 1) auth_type = auth_type.lower() except ValueError: return if (auth_type == 'basic'): try: decoded = base64.b64decode(auth_info).decode( 'utf-8') # b64decode gives bytes in python3 (username, password) = decoded.split(':', 1) except (ValueError, TypeError): # py3, py2 return return {'type': 'basic', 'username': username, 'password': password} elif (auth_type == 'digest'): try: auth_map = parse_keqv_list(parse_http_list(auth_info)) except ValueError: return logging.debug(auth_map) for key in 'username', 'realm', 'nonce', 'uri', 'response': if key not in auth_map: return if 'qop' in auth_map and ('nc' not in auth_map or 'cnonce' not in auth_map): return auth_map['type'] = 'digest' return auth_map else: # unknown auth type return
python
def parse_authorization_header(value): try: (auth_type, auth_info) = value.split(' ', 1) auth_type = auth_type.lower() except ValueError: return if (auth_type == 'basic'): try: decoded = base64.b64decode(auth_info).decode( 'utf-8') # b64decode gives bytes in python3 (username, password) = decoded.split(':', 1) except (ValueError, TypeError): # py3, py2 return return {'type': 'basic', 'username': username, 'password': password} elif (auth_type == 'digest'): try: auth_map = parse_keqv_list(parse_http_list(auth_info)) except ValueError: return logging.debug(auth_map) for key in 'username', 'realm', 'nonce', 'uri', 'response': if key not in auth_map: return if 'qop' in auth_map and ('nc' not in auth_map or 'cnonce' not in auth_map): return auth_map['type'] = 'digest' return auth_map else: # unknown auth type return
[ "def", "parse_authorization_header", "(", "value", ")", ":", "try", ":", "(", "auth_type", ",", "auth_info", ")", "=", "value", ".", "split", "(", "' '", ",", "1", ")", "auth_type", "=", "auth_type", ".", "lower", "(", ")", "except", "ValueError", ":", ...
Parse the Authenticate header. Returns nothing on failure, opts hash on success with type='basic' or 'digest' and other params. <http://nullege.com/codes/search/werkzeug.http.parse_authorization_header> <http://stackoverflow.com/questions/1349367/parse-an-http-request-authorization-header-with-python> <http://bugs.python.org/file34041/0001-Add-an-authorization-header-to-the-initial-request.patch>
[ "Parse", "the", "Authenticate", "header", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L475-L513
7,281
zimeon/iiif
iiif/flask_utils.py
do_conneg
def do_conneg(accept, supported): """Parse accept header and look for preferred type in supported list. Arguments: accept - HTTP Accept header supported - list of MIME type supported by the server Returns: supported MIME type with highest q value in request, else None. FIXME - Should replace this with negotiator2 """ for result in parse_accept_header(accept): mime_type = result[0] if (mime_type in supported): return mime_type return None
python
def do_conneg(accept, supported): for result in parse_accept_header(accept): mime_type = result[0] if (mime_type in supported): return mime_type return None
[ "def", "do_conneg", "(", "accept", ",", "supported", ")", ":", "for", "result", "in", "parse_accept_header", "(", "accept", ")", ":", "mime_type", "=", "result", "[", "0", "]", "if", "(", "mime_type", "in", "supported", ")", ":", "return", "mime_type", "...
Parse accept header and look for preferred type in supported list. Arguments: accept - HTTP Accept header supported - list of MIME type supported by the server Returns: supported MIME type with highest q value in request, else None. FIXME - Should replace this with negotiator2
[ "Parse", "accept", "header", "and", "look", "for", "preferred", "type", "in", "supported", "list", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L516-L532
7,282
zimeon/iiif
iiif/flask_utils.py
setup_auth_paths
def setup_auth_paths(app, auth, prefix, params): """Add URL rules for auth paths.""" base = urljoin('/', prefix + '/') # Must end in slash app.add_url_rule(base + 'login', prefix + 'login_handler', auth.login_handler, defaults=params) app.add_url_rule(base + 'logout', prefix + 'logout_handler', auth.logout_handler, defaults=params) if (auth.client_id_handler): app.add_url_rule(base + 'client', prefix + 'client_id_handler', auth.client_id_handler, defaults=params) app.add_url_rule(base + 'token', prefix + 'access_token_handler', auth.access_token_handler, defaults=params) if (auth.home_handler): app.add_url_rule(base + 'home', prefix + 'home_handler', auth.home_handler, defaults=params)
python
def setup_auth_paths(app, auth, prefix, params): base = urljoin('/', prefix + '/') # Must end in slash app.add_url_rule(base + 'login', prefix + 'login_handler', auth.login_handler, defaults=params) app.add_url_rule(base + 'logout', prefix + 'logout_handler', auth.logout_handler, defaults=params) if (auth.client_id_handler): app.add_url_rule(base + 'client', prefix + 'client_id_handler', auth.client_id_handler, defaults=params) app.add_url_rule(base + 'token', prefix + 'access_token_handler', auth.access_token_handler, defaults=params) if (auth.home_handler): app.add_url_rule(base + 'home', prefix + 'home_handler', auth.home_handler, defaults=params)
[ "def", "setup_auth_paths", "(", "app", ",", "auth", ",", "prefix", ",", "params", ")", ":", "base", "=", "urljoin", "(", "'/'", ",", "prefix", "+", "'/'", ")", "# Must end in slash", "app", ".", "add_url_rule", "(", "base", "+", "'login'", ",", "prefix",...
Add URL rules for auth paths.
[ "Add", "URL", "rules", "for", "auth", "paths", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L537-L551
7,283
zimeon/iiif
iiif/flask_utils.py
make_prefix
def make_prefix(api_version, manipulator, auth_type): """Make prefix string based on configuration parameters.""" prefix = "%s_%s" % (api_version, manipulator) if (auth_type and auth_type != 'none'): prefix += '_' + auth_type return prefix
python
def make_prefix(api_version, manipulator, auth_type): prefix = "%s_%s" % (api_version, manipulator) if (auth_type and auth_type != 'none'): prefix += '_' + auth_type return prefix
[ "def", "make_prefix", "(", "api_version", ",", "manipulator", ",", "auth_type", ")", ":", "prefix", "=", "\"%s_%s\"", "%", "(", "api_version", ",", "manipulator", ")", "if", "(", "auth_type", "and", "auth_type", "!=", "'none'", ")", ":", "prefix", "+=", "'...
Make prefix string based on configuration parameters.
[ "Make", "prefix", "string", "based", "on", "configuration", "parameters", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L554-L559
7,284
zimeon/iiif
iiif/flask_utils.py
split_comma_argument
def split_comma_argument(comma_sep_str): """Split a comma separated option into a list.""" terms = [] for term in comma_sep_str.split(','): if term: terms.append(term) return terms
python
def split_comma_argument(comma_sep_str): terms = [] for term in comma_sep_str.split(','): if term: terms.append(term) return terms
[ "def", "split_comma_argument", "(", "comma_sep_str", ")", ":", "terms", "=", "[", "]", "for", "term", "in", "comma_sep_str", ".", "split", "(", "','", ")", ":", "if", "term", ":", "terms", ".", "append", "(", "term", ")", "return", "terms" ]
Split a comma separated option into a list.
[ "Split", "a", "comma", "separated", "option", "into", "a", "list", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L562-L568
7,285
zimeon/iiif
iiif/flask_utils.py
write_pid_file
def write_pid_file(): """Write a file with the PID of this server instance. Call when setting up a command line testserver. """ pidfile = os.path.basename(sys.argv[0])[:-3] + '.pid' # strip .py, add .pid with open(pidfile, 'w') as fh: fh.write("%d\n" % os.getpid()) fh.close()
python
def write_pid_file(): pidfile = os.path.basename(sys.argv[0])[:-3] + '.pid' # strip .py, add .pid with open(pidfile, 'w') as fh: fh.write("%d\n" % os.getpid()) fh.close()
[ "def", "write_pid_file", "(", ")", ":", "pidfile", "=", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", "[", ":", "-", "3", "]", "+", "'.pid'", "# strip .py, add .pid", "with", "open", "(", "pidfile", ",", "'w'", ")...
Write a file with the PID of this server instance. Call when setting up a command line testserver.
[ "Write", "a", "file", "with", "the", "PID", "of", "this", "server", "instance", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L739-L747
7,286
zimeon/iiif
iiif/flask_utils.py
setup_app
def setup_app(app, cfg): """Setup Flask app and handle reverse proxy setup if configured. Arguments: app - Flask application cfg - configuration data """ # Set up app_host and app_port in case that we are running # under reverse proxy setup, otherwise they default to # config.host and config.port. if (cfg.app_host and cfg.app_port): logging.warning("Reverse proxy for service at http://%s:%d/ ..." % (cfg.host, cfg.port)) app.wsgi_app = ReverseProxied(app.wsgi_app, cfg.host) elif (cfg.app_host or cfg.app_port): logging.critical("Must specify both app-host and app-port for reverse proxy configuration, aborting") sys.exit(1) else: cfg.app_host = cfg.host cfg.app_port = cfg.port logging.warning("Setup server on http://%s:%d/ ..." % (cfg.app_host, cfg.app_port)) return(app)
python
def setup_app(app, cfg): # Set up app_host and app_port in case that we are running # under reverse proxy setup, otherwise they default to # config.host and config.port. if (cfg.app_host and cfg.app_port): logging.warning("Reverse proxy for service at http://%s:%d/ ..." % (cfg.host, cfg.port)) app.wsgi_app = ReverseProxied(app.wsgi_app, cfg.host) elif (cfg.app_host or cfg.app_port): logging.critical("Must specify both app-host and app-port for reverse proxy configuration, aborting") sys.exit(1) else: cfg.app_host = cfg.host cfg.app_port = cfg.port logging.warning("Setup server on http://%s:%d/ ..." % (cfg.app_host, cfg.app_port)) return(app)
[ "def", "setup_app", "(", "app", ",", "cfg", ")", ":", "# Set up app_host and app_port in case that we are running", "# under reverse proxy setup, otherwise they default to", "# config.host and config.port.", "if", "(", "cfg", ".", "app_host", "and", "cfg", ".", "app_port", ")...
Setup Flask app and handle reverse proxy setup if configured. Arguments: app - Flask application cfg - configuration data
[ "Setup", "Flask", "app", "and", "handle", "reverse", "proxy", "setup", "if", "configured", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L750-L770
7,287
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.server_and_prefix
def server_and_prefix(self): """Server and prefix from config.""" return(host_port_prefix(self.config.host, self.config.port, self.prefix))
python
def server_and_prefix(self): return(host_port_prefix(self.config.host, self.config.port, self.prefix))
[ "def", "server_and_prefix", "(", "self", ")", ":", "return", "(", "host_port_prefix", "(", "self", ".", "config", ".", "host", ",", "self", ".", "config", ".", "port", ",", "self", ".", "prefix", ")", ")" ]
Server and prefix from config.
[ "Server", "and", "prefix", "from", "config", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L231-L233
7,288
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.json_mime_type
def json_mime_type(self): """Return the MIME type for a JSON response. For version 2.0+ the server must return json-ld MIME type if that format is requested. Implement for 1.1 also. http://iiif.io/api/image/2.1/#information-request """ mime_type = "application/json" if (self.api_version >= '1.1' and 'Accept' in request.headers): mime_type = do_conneg(request.headers['Accept'], [ 'application/ld+json']) or mime_type return mime_type
python
def json_mime_type(self): mime_type = "application/json" if (self.api_version >= '1.1' and 'Accept' in request.headers): mime_type = do_conneg(request.headers['Accept'], [ 'application/ld+json']) or mime_type return mime_type
[ "def", "json_mime_type", "(", "self", ")", ":", "mime_type", "=", "\"application/json\"", "if", "(", "self", ".", "api_version", ">=", "'1.1'", "and", "'Accept'", "in", "request", ".", "headers", ")", ":", "mime_type", "=", "do_conneg", "(", "request", ".", ...
Return the MIME type for a JSON response. For version 2.0+ the server must return json-ld MIME type if that format is requested. Implement for 1.1 also. http://iiif.io/api/image/2.1/#information-request
[ "Return", "the", "MIME", "type", "for", "a", "JSON", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L236-L247
7,289
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.file
def file(self): """Filename property for the source image for the current identifier.""" file = None if (self.config.klass_name == 'gen'): for ext in ['.py']: file = os.path.join( self.config.generator_dir, self.identifier + ext) if (os.path.isfile(file)): return file else: for ext in ['.jpg', '.png', '.tif']: file = os.path.join(self.config.image_dir, self.identifier + ext) if (os.path.isfile(file)): return file # failed, show list of available identifiers as error available = "\n ".join(identifiers(self.config)) raise IIIFError(code=404, parameter="identifier", text="Image resource '" + self.identifier + "' not found. Local resources available:" + available + "\n")
python
def file(self): file = None if (self.config.klass_name == 'gen'): for ext in ['.py']: file = os.path.join( self.config.generator_dir, self.identifier + ext) if (os.path.isfile(file)): return file else: for ext in ['.jpg', '.png', '.tif']: file = os.path.join(self.config.image_dir, self.identifier + ext) if (os.path.isfile(file)): return file # failed, show list of available identifiers as error available = "\n ".join(identifiers(self.config)) raise IIIFError(code=404, parameter="identifier", text="Image resource '" + self.identifier + "' not found. Local resources available:" + available + "\n")
[ "def", "file", "(", "self", ")", ":", "file", "=", "None", "if", "(", "self", ".", "config", ".", "klass_name", "==", "'gen'", ")", ":", "for", "ext", "in", "[", "'.py'", "]", ":", "file", "=", "os", ".", "path", ".", "join", "(", "self", ".", ...
Filename property for the source image for the current identifier.
[ "Filename", "property", "for", "the", "source", "image", "for", "the", "current", "identifier", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L250-L268
7,290
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.add_compliance_header
def add_compliance_header(self): """Add IIIF Compliance level header to response.""" if (self.manipulator.compliance_uri is not None): self.headers['Link'] = '<' + \ self.manipulator.compliance_uri + '>;rel="profile"'
python
def add_compliance_header(self): if (self.manipulator.compliance_uri is not None): self.headers['Link'] = '<' + \ self.manipulator.compliance_uri + '>;rel="profile"'
[ "def", "add_compliance_header", "(", "self", ")", ":", "if", "(", "self", ".", "manipulator", ".", "compliance_uri", "is", "not", "None", ")", ":", "self", ".", "headers", "[", "'Link'", "]", "=", "'<'", "+", "self", ".", "manipulator", ".", "compliance_...
Add IIIF Compliance level header to response.
[ "Add", "IIIF", "Compliance", "level", "header", "to", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L270-L274
7,291
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.make_response
def make_response(self, content, code=200, headers=None): """Wrapper around Flask.make_response which also adds any local headers.""" if headers: for header in headers: self.headers[header] = headers[header] return make_response(content, code, self.headers)
python
def make_response(self, content, code=200, headers=None): if headers: for header in headers: self.headers[header] = headers[header] return make_response(content, code, self.headers)
[ "def", "make_response", "(", "self", ",", "content", ",", "code", "=", "200", ",", "headers", "=", "None", ")", ":", "if", "headers", ":", "for", "header", "in", "headers", ":", "self", ".", "headers", "[", "header", "]", "=", "headers", "[", "header...
Wrapper around Flask.make_response which also adds any local headers.
[ "Wrapper", "around", "Flask", ".", "make_response", "which", "also", "adds", "any", "local", "headers", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L276-L281
7,292
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.image_information_response
def image_information_response(self): """Parse image information request and create response.""" dr = degraded_request(self.identifier) if (dr): self.logger.info("image_information: degraded %s -> %s" % (self.identifier, dr)) self.degraded = self.identifier self.identifier = dr else: self.logger.info("image_information: %s" % (self.identifier)) # get size self.manipulator.srcfile = self.file self.manipulator.do_first() # most of info.json comes from config, a few things specific to image info = {'tile_height': self.config.tile_height, 'tile_width': self.config.tile_width, 'scale_factors': self.config.scale_factors } # calculate scale factors if not hard-coded if ('auto' in self.config.scale_factors): info['scale_factors'] = self.manipulator.scale_factors( self.config.tile_width, self.config.tile_height) i = IIIFInfo(conf=info, api_version=self.api_version) i.server_and_prefix = self.server_and_prefix i.identifier = self.iiif.identifier i.width = self.manipulator.width i.height = self.manipulator.height if (self.api_version >= '2.0'): # FIXME - should come from manipulator i.qualities = ["default", "color", "gray"] else: # FIXME - should come from manipulator i.qualities = ["native", "color", "gray"] i.formats = ["jpg", "png"] # FIXME - should come from manipulator if (self.auth): self.auth.add_services(i) return self.make_response(i.as_json(), headers={"Content-Type": self.json_mime_type})
python
def image_information_response(self): dr = degraded_request(self.identifier) if (dr): self.logger.info("image_information: degraded %s -> %s" % (self.identifier, dr)) self.degraded = self.identifier self.identifier = dr else: self.logger.info("image_information: %s" % (self.identifier)) # get size self.manipulator.srcfile = self.file self.manipulator.do_first() # most of info.json comes from config, a few things specific to image info = {'tile_height': self.config.tile_height, 'tile_width': self.config.tile_width, 'scale_factors': self.config.scale_factors } # calculate scale factors if not hard-coded if ('auto' in self.config.scale_factors): info['scale_factors'] = self.manipulator.scale_factors( self.config.tile_width, self.config.tile_height) i = IIIFInfo(conf=info, api_version=self.api_version) i.server_and_prefix = self.server_and_prefix i.identifier = self.iiif.identifier i.width = self.manipulator.width i.height = self.manipulator.height if (self.api_version >= '2.0'): # FIXME - should come from manipulator i.qualities = ["default", "color", "gray"] else: # FIXME - should come from manipulator i.qualities = ["native", "color", "gray"] i.formats = ["jpg", "png"] # FIXME - should come from manipulator if (self.auth): self.auth.add_services(i) return self.make_response(i.as_json(), headers={"Content-Type": self.json_mime_type})
[ "def", "image_information_response", "(", "self", ")", ":", "dr", "=", "degraded_request", "(", "self", ".", "identifier", ")", "if", "(", "dr", ")", ":", "self", ".", "logger", ".", "info", "(", "\"image_information: degraded %s -> %s\"", "%", "(", "self", ...
Parse image information request and create response.
[ "Parse", "image", "information", "request", "and", "create", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L283-L320
7,293
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.image_request_response
def image_request_response(self, path): """Parse image request and create response.""" # Parse the request in path if (len(path) > 1024): raise IIIFError(code=414, text="URI Too Long: Max 1024 chars, got %d\n" % len(path)) try: self.iiif.identifier = self.identifier self.iiif.parse_url(path) except IIIFRequestPathError as e: # Reraise as IIIFError with code=404 because we can't tell # whether there was an encoded slash in the identifier or # whether there was a bad number of path segments. raise IIIFError(code=404, text=e.text) except IIIFError as e: # Pass through raise e except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")") dr = degraded_request(self.identifier) if (dr): self.logger.info("image_request: degraded %s -> %s" % (self.identifier, dr)) self.degraded = self.identifier self.identifier = dr self.iiif.quality = 'gray' else: # Parsed request OK, attempt to fulfill self.logger.info("image_request: %s" % (self.identifier)) file = self.file self.manipulator.srcfile = file self.manipulator.do_first() if (self.api_version < '2.0' and self.iiif.format is None and 'Accept' in request.headers): # In 1.0 and 1.1 conneg was specified as an alternative to format, see: # http://iiif.io/api/image/1.0/#format # http://iiif.io/api/image/1.1/#parameters-format formats = {'image/jpeg': 'jpg', 'image/tiff': 'tif', 'image/png': 'png', 'image/gif': 'gif', 'image/jp2': 'jps', 'application/pdf': 'pdf'} accept = do_conneg(request.headers['Accept'], list(formats.keys())) # Ignore Accept header if not recognized, should this be an error # instead? if (accept in formats): self.iiif.format = formats[accept] (outfile, mime_type) = self.manipulator.derive(file, self.iiif) # FIXME - find efficient way to serve file with headers self.add_compliance_header() return send_file(outfile, mimetype=mime_type)
python
def image_request_response(self, path): # Parse the request in path if (len(path) > 1024): raise IIIFError(code=414, text="URI Too Long: Max 1024 chars, got %d\n" % len(path)) try: self.iiif.identifier = self.identifier self.iiif.parse_url(path) except IIIFRequestPathError as e: # Reraise as IIIFError with code=404 because we can't tell # whether there was an encoded slash in the identifier or # whether there was a bad number of path segments. raise IIIFError(code=404, text=e.text) except IIIFError as e: # Pass through raise e except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")") dr = degraded_request(self.identifier) if (dr): self.logger.info("image_request: degraded %s -> %s" % (self.identifier, dr)) self.degraded = self.identifier self.identifier = dr self.iiif.quality = 'gray' else: # Parsed request OK, attempt to fulfill self.logger.info("image_request: %s" % (self.identifier)) file = self.file self.manipulator.srcfile = file self.manipulator.do_first() if (self.api_version < '2.0' and self.iiif.format is None and 'Accept' in request.headers): # In 1.0 and 1.1 conneg was specified as an alternative to format, see: # http://iiif.io/api/image/1.0/#format # http://iiif.io/api/image/1.1/#parameters-format formats = {'image/jpeg': 'jpg', 'image/tiff': 'tif', 'image/png': 'png', 'image/gif': 'gif', 'image/jp2': 'jps', 'application/pdf': 'pdf'} accept = do_conneg(request.headers['Accept'], list(formats.keys())) # Ignore Accept header if not recognized, should this be an error # instead? if (accept in formats): self.iiif.format = formats[accept] (outfile, mime_type) = self.manipulator.derive(file, self.iiif) # FIXME - find efficient way to serve file with headers self.add_compliance_header() return send_file(outfile, mimetype=mime_type)
[ "def", "image_request_response", "(", "self", ",", "path", ")", ":", "# Parse the request in path", "if", "(", "len", "(", "path", ")", ">", "1024", ")", ":", "raise", "IIIFError", "(", "code", "=", "414", ",", "text", "=", "\"URI Too Long: Max 1024 chars, got...
Parse image request and create response.
[ "Parse", "image", "request", "and", "create", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L322-L373
7,294
zimeon/iiif
iiif/flask_utils.py
IIIFHandler.error_response
def error_response(self, e): """Make response for an IIIFError e. Also add compliance header. """ self.add_compliance_header() return self.make_response(*e.image_server_response(self.api_version))
python
def error_response(self, e): self.add_compliance_header() return self.make_response(*e.image_server_response(self.api_version))
[ "def", "error_response", "(", "self", ",", "e", ")", ":", "self", ".", "add_compliance_header", "(", ")", "return", "self", ".", "make_response", "(", "*", "e", ".", "image_server_response", "(", "self", ".", "api_version", ")", ")" ]
Make response for an IIIFError e. Also add compliance header.
[ "Make", "response", "for", "an", "IIIFError", "e", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L375-L381
7,295
zimeon/iiif
iiif_cgi.py
IIIFRequestHandler.error_response
def error_response(self, code, content=''): """Construct and send error response.""" self.send_response(code) self.send_header('Content-Type', 'text/xml') self.add_compliance_header() self.end_headers() self.wfile.write(content)
python
def error_response(self, code, content=''): self.send_response(code) self.send_header('Content-Type', 'text/xml') self.add_compliance_header() self.end_headers() self.wfile.write(content)
[ "def", "error_response", "(", "self", ",", "code", ",", "content", "=", "''", ")", ":", "self", ".", "send_response", "(", "code", ")", "self", ".", "send_header", "(", "'Content-Type'", ",", "'text/xml'", ")", "self", ".", "add_compliance_header", "(", ")...
Construct and send error response.
[ "Construct", "and", "send", "error", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif_cgi.py#L64-L70
7,296
zimeon/iiif
iiif_cgi.py
IIIFRequestHandler.do_GET
def do_GET(self): """Implement the HTTP GET method. The bulk of this code is wrapped in a big try block and anywhere within the code may raise an IIIFError which then results in an IIIF error response (section 5 of spec). """ self.compliance_uri = None self.iiif = IIIFRequest(baseurl='/') try: (of, mime_type) = self.do_GET_body() if (not of): raise IIIFError("Unexpected failure to open result image") self.send_response(200, 'OK') if (mime_type is not None): self.send_header('Content-Type', mime_type) self.add_compliance_header() self.end_headers() while (1): buffer = of.read(8192) if (not buffer): break self.wfile.write(buffer) # Now cleanup self.manipulator.cleanup() except IIIFError as e: if (self.debug): e.text += "\nRequest parameters:\n" + str(self.iiif) self.error_response(e.code, str(e)) except Exception as ue: # Anything else becomes a 500 Internal Server Error e = IIIFError(code=500, text="Something went wrong... %s ---- %s.\n" % (str(ue), traceback.format_exc())) if (self.debug): e.text += "\nRequest parameters:\n" + str(self.iiif) self.error_response(e.code, str(e))
python
def do_GET(self): self.compliance_uri = None self.iiif = IIIFRequest(baseurl='/') try: (of, mime_type) = self.do_GET_body() if (not of): raise IIIFError("Unexpected failure to open result image") self.send_response(200, 'OK') if (mime_type is not None): self.send_header('Content-Type', mime_type) self.add_compliance_header() self.end_headers() while (1): buffer = of.read(8192) if (not buffer): break self.wfile.write(buffer) # Now cleanup self.manipulator.cleanup() except IIIFError as e: if (self.debug): e.text += "\nRequest parameters:\n" + str(self.iiif) self.error_response(e.code, str(e)) except Exception as ue: # Anything else becomes a 500 Internal Server Error e = IIIFError(code=500, text="Something went wrong... %s ---- %s.\n" % (str(ue), traceback.format_exc())) if (self.debug): e.text += "\nRequest parameters:\n" + str(self.iiif) self.error_response(e.code, str(e))
[ "def", "do_GET", "(", "self", ")", ":", "self", ".", "compliance_uri", "=", "None", "self", ".", "iiif", "=", "IIIFRequest", "(", "baseurl", "=", "'/'", ")", "try", ":", "(", "of", ",", "mime_type", ")", "=", "self", ".", "do_GET_body", "(", ")", "...
Implement the HTTP GET method. The bulk of this code is wrapped in a big try block and anywhere within the code may raise an IIIFError which then results in an IIIF error response (section 5 of spec).
[ "Implement", "the", "HTTP", "GET", "method", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif_cgi.py#L78-L113
7,297
zimeon/iiif
iiif_cgi.py
IIIFRequestHandler.do_GET_body
def do_GET_body(self): """Create body of GET.""" iiif = self.iiif if (len(self.path) > 1024): raise IIIFError(code=414, text="URI Too Long: Max 1024 chars, got %d\n" % len(self.path)) try: # self.path has leading / then identifier/params... self.path = self.path.lstrip('/') sys.stderr.write("path = %s" % (self.path)) iiif.parse_url(self.path) except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")") # Now we have a full iiif request if (re.match('[\w\.\-]+$', iiif.identifier)): file = os.path.join(TESTIMAGE_DIR, iiif.identifier) if (not os.path.isfile(file)): images_available = "" for image_file in os.listdir(TESTIMAGE_DIR): if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))): images_available += " " + image_file + "\n" raise IIIFError(code=404, parameter="identifier", text="Image resource '" + iiif.identifier + "' not found. Local image files available:\n" + images_available) else: raise IIIFError(code=404, parameter="identifier", text="Image resource '" + iiif.identifier + "' not found. Only local test images and http: URIs for images are supported.\n") # Now know image is OK manipulator = IIIFRequestHandler.manipulator_class() # Stash manipulator object so we can cleanup after reading file self.manipulator = manipulator self.compliance_uri = manipulator.compliance_uri if (iiif.info): # get size manipulator.srcfile = file manipulator.do_first() # most of info.json comes from config, a few things # specific to image i = IIIFInfo() i.identifier = self.iiif.identifier i.width = manipulator.width i.height = manipulator.height import io return(io.StringIO(i.as_json()), "application/json") else: (outfile, mime_type) = manipulator.derive(file, iiif) return(open(outfile, 'r'), mime_type)
python
def do_GET_body(self): iiif = self.iiif if (len(self.path) > 1024): raise IIIFError(code=414, text="URI Too Long: Max 1024 chars, got %d\n" % len(self.path)) try: # self.path has leading / then identifier/params... self.path = self.path.lstrip('/') sys.stderr.write("path = %s" % (self.path)) iiif.parse_url(self.path) except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")") # Now we have a full iiif request if (re.match('[\w\.\-]+$', iiif.identifier)): file = os.path.join(TESTIMAGE_DIR, iiif.identifier) if (not os.path.isfile(file)): images_available = "" for image_file in os.listdir(TESTIMAGE_DIR): if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))): images_available += " " + image_file + "\n" raise IIIFError(code=404, parameter="identifier", text="Image resource '" + iiif.identifier + "' not found. Local image files available:\n" + images_available) else: raise IIIFError(code=404, parameter="identifier", text="Image resource '" + iiif.identifier + "' not found. Only local test images and http: URIs for images are supported.\n") # Now know image is OK manipulator = IIIFRequestHandler.manipulator_class() # Stash manipulator object so we can cleanup after reading file self.manipulator = manipulator self.compliance_uri = manipulator.compliance_uri if (iiif.info): # get size manipulator.srcfile = file manipulator.do_first() # most of info.json comes from config, a few things # specific to image i = IIIFInfo() i.identifier = self.iiif.identifier i.width = manipulator.width i.height = manipulator.height import io return(io.StringIO(i.as_json()), "application/json") else: (outfile, mime_type) = manipulator.derive(file, iiif) return(open(outfile, 'r'), mime_type)
[ "def", "do_GET_body", "(", "self", ")", ":", "iiif", "=", "self", ".", "iiif", "if", "(", "len", "(", "self", ".", "path", ")", ">", "1024", ")", ":", "raise", "IIIFError", "(", "code", "=", "414", ",", "text", "=", "\"URI Too Long: Max 1024 chars, got...
Create body of GET.
[ "Create", "body", "of", "GET", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif_cgi.py#L115-L162
7,298
zimeon/iiif
iiif/auth.py
IIIFAuth.set_cookie_prefix
def set_cookie_prefix(self, cookie_prefix=None): """Set a random cookie prefix unless one is specified. In order to run multiple demonstration auth services on the same server we need to have different cookie names for each auth domain. Unless cookie_prefix is set, generate a random one. """ if (cookie_prefix is None): self.cookie_prefix = "%06d_" % int(random.random() * 1000000) else: self.cookie_prefix = cookie_prefix
python
def set_cookie_prefix(self, cookie_prefix=None): if (cookie_prefix is None): self.cookie_prefix = "%06d_" % int(random.random() * 1000000) else: self.cookie_prefix = cookie_prefix
[ "def", "set_cookie_prefix", "(", "self", ",", "cookie_prefix", "=", "None", ")", ":", "if", "(", "cookie_prefix", "is", "None", ")", ":", "self", ".", "cookie_prefix", "=", "\"%06d_\"", "%", "int", "(", "random", ".", "random", "(", ")", "*", "1000000", ...
Set a random cookie prefix unless one is specified. In order to run multiple demonstration auth services on the same server we need to have different cookie names for each auth domain. Unless cookie_prefix is set, generate a random one.
[ "Set", "a", "random", "cookie", "prefix", "unless", "one", "is", "specified", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L41-L52
7,299
zimeon/iiif
iiif/auth.py
IIIFAuth.add_services
def add_services(self, info): """Add auth service descriptions to an IIIFInfo object. Login service description is the wrapper for all other auth service descriptions so we have nothing unless self.login_uri is specified. If we do then add any other auth services at children. Exactly the same structure is used in the authorized and unauthorized cases (although in the data could be different). """ if (self.login_uri): svc = self.login_service_description() svcs = [] if (self.logout_uri): svcs.append(self.logout_service_description()) if (self.client_id_uri): svcs.append(self.client_id_service_description()) if (self.access_token_uri): svcs.append(self.access_token_service_description()) # Add one as direct child of service property, else array for >1 if (len(svcs) == 1): svc['service'] = svcs[0] elif (len(svcs) > 1): svc['service'] = svcs info.add_service(svc)
python
def add_services(self, info): if (self.login_uri): svc = self.login_service_description() svcs = [] if (self.logout_uri): svcs.append(self.logout_service_description()) if (self.client_id_uri): svcs.append(self.client_id_service_description()) if (self.access_token_uri): svcs.append(self.access_token_service_description()) # Add one as direct child of service property, else array for >1 if (len(svcs) == 1): svc['service'] = svcs[0] elif (len(svcs) > 1): svc['service'] = svcs info.add_service(svc)
[ "def", "add_services", "(", "self", ",", "info", ")", ":", "if", "(", "self", ".", "login_uri", ")", ":", "svc", "=", "self", ".", "login_service_description", "(", ")", "svcs", "=", "[", "]", "if", "(", "self", ".", "logout_uri", ")", ":", "svcs", ...
Add auth service descriptions to an IIIFInfo object. Login service description is the wrapper for all other auth service descriptions so we have nothing unless self.login_uri is specified. If we do then add any other auth services at children. Exactly the same structure is used in the authorized and unauthorized cases (although in the data could be different).
[ "Add", "auth", "service", "descriptions", "to", "an", "IIIFInfo", "object", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L54-L80