id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
7,300
zimeon/iiif
iiif/auth.py
IIIFAuth.login_service_description
def login_service_description(self): """Login service description. The login service description _MUST_ include the token service description. The authentication pattern is indicated via the profile URI which is built using self.auth_pattern. """ label = 'Login to ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' desc = {"@id": self.login_uri, "profile": self.profile_base + self.auth_pattern, "label": label} if (self.header): desc['header'] = self.header if (self.description): desc['description'] = self.description return desc
python
def login_service_description(self): label = 'Login to ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' desc = {"@id": self.login_uri, "profile": self.profile_base + self.auth_pattern, "label": label} if (self.header): desc['header'] = self.header if (self.description): desc['description'] = self.description return desc
[ "def", "login_service_description", "(", "self", ")", ":", "label", "=", "'Login to '", "+", "self", ".", "name", "if", "(", "self", ".", "auth_type", ")", ":", "label", "=", "label", "+", "' ('", "+", "self", ".", "auth_type", "+", "')'", "desc", "=",...
Login service description. The login service description _MUST_ include the token service description. The authentication pattern is indicated via the profile URI which is built using self.auth_pattern.
[ "Login", "service", "description", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L82-L99
7,301
zimeon/iiif
iiif/auth.py
IIIFAuth.logout_service_description
def logout_service_description(self): """Logout service description.""" label = 'Logout from ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' return({"@id": self.logout_uri, "profile": self.profile_base + 'logout', "label": label})
python
def logout_service_description(self): label = 'Logout from ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' return({"@id": self.logout_uri, "profile": self.profile_base + 'logout', "label": label})
[ "def", "logout_service_description", "(", "self", ")", ":", "label", "=", "'Logout from '", "+", "self", ".", "name", "if", "(", "self", ".", "auth_type", ")", ":", "label", "=", "label", "+", "' ('", "+", "self", ".", "auth_type", "+", "')'", "return", ...
Logout service description.
[ "Logout", "service", "description", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L101-L108
7,302
zimeon/iiif
iiif/auth.py
IIIFAuth.access_token_response
def access_token_response(self, token, message_id=None): """Access token response structure. Success if token is set, otherwise (None, empty string) give error response. If message_id is set then an extra messageId attribute is set in the response to handle postMessage() responses. """ if (token): data = {"accessToken": token, "expiresIn": self.access_token_lifetime} if (message_id): data['messageId'] = message_id else: data = {"error": "client_unauthorized", "description": "No authorization details received"} return data
python
def access_token_response(self, token, message_id=None): if (token): data = {"accessToken": token, "expiresIn": self.access_token_lifetime} if (message_id): data['messageId'] = message_id else: data = {"error": "client_unauthorized", "description": "No authorization details received"} return data
[ "def", "access_token_response", "(", "self", ",", "token", ",", "message_id", "=", "None", ")", ":", "if", "(", "token", ")", ":", "data", "=", "{", "\"accessToken\"", ":", "token", ",", "\"expiresIn\"", ":", "self", ".", "access_token_lifetime", "}", "if"...
Access token response structure. Success if token is set, otherwise (None, empty string) give error response. If message_id is set then an extra messageId attribute is set in the response to handle postMessage() responses.
[ "Access", "token", "response", "structure", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L123-L138
7,303
zimeon/iiif
iiif/auth.py
IIIFAuth._generate_random_string
def _generate_random_string(self, container, length=20): """Generate a random cookie or token string not in container. The cookie or token should be secure in the sense that it should not be likely to be able guess a value. Because it is not derived from anything else, there is no vulnerability of the token from computation, or possible leakage of information from the token. """ while True: s = ''.join([random.SystemRandom().choice(string.digits + string.ascii_letters) for n in range(length)]) if (s not in container): break return s
python
def _generate_random_string(self, container, length=20): while True: s = ''.join([random.SystemRandom().choice(string.digits + string.ascii_letters) for n in range(length)]) if (s not in container): break return s
[ "def", "_generate_random_string", "(", "self", ",", "container", ",", "length", "=", "20", ")", ":", "while", "True", ":", "s", "=", "''", ".", "join", "(", "[", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "string", ".", "digits", "+...
Generate a random cookie or token string not in container. The cookie or token should be secure in the sense that it should not be likely to be able guess a value. Because it is not derived from anything else, there is no vulnerability of the token from computation, or possible leakage of information from the token.
[ "Generate", "a", "random", "cookie", "or", "token", "string", "not", "in", "container", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L189-L202
7,304
zimeon/iiif
iiif/auth.py
IIIFAuth.access_cookie
def access_cookie(self, account): """Make and store access cookie for a given account. If account is allowed then make a cookie and add it to the dict of accepted access cookies with current timestamp as the value. Return the access cookie. Otherwise return None. """ if (self.account_allowed(account)): cookie = self._generate_random_string(self.access_cookies) self.access_cookies[cookie] = int(time.time()) return cookie else: return None
python
def access_cookie(self, account): if (self.account_allowed(account)): cookie = self._generate_random_string(self.access_cookies) self.access_cookies[cookie] = int(time.time()) return cookie else: return None
[ "def", "access_cookie", "(", "self", ",", "account", ")", ":", "if", "(", "self", ".", "account_allowed", "(", "account", ")", ")", ":", "cookie", "=", "self", ".", "_generate_random_string", "(", "self", ".", "access_cookies", ")", "self", ".", "access_co...
Make and store access cookie for a given account. If account is allowed then make a cookie and add it to the dict of accepted access cookies with current timestamp as the value. Return the access cookie. Otherwise return None.
[ "Make", "and", "store", "access", "cookie", "for", "a", "given", "account", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L213-L227
7,305
zimeon/iiif
iiif/auth.py
IIIFAuth.access_cookie_valid
def access_cookie_valid(self, cookie, log_msg): """Check access cookie validity. Returns true if the access cookie is valid. The set of allowed access cookies is stored in self.access_cookies. Uses log_msg as prefix to info level log message of accetance or rejection. """ if (cookie in self.access_cookies): age = int(time.time()) - self.access_cookies[cookie] if (age <= (self.access_cookie_lifetime + 1)): self.logger.info(log_msg + " " + cookie + " ACCEPTED COOKIE (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + cookie + " EXPIRED COOKIE (%ds old > %ds)" % (age, self.access_cookie_lifetime)) # Keep cookie for 2x lifetim in order to generate # helpful expired message if (age > (self.access_cookie_lifetime * 2)): del self.access_cookies[cookie] return False else: self.logger.info(log_msg + " " + cookie + " REJECTED COOKIE") return False
python
def access_cookie_valid(self, cookie, log_msg): if (cookie in self.access_cookies): age = int(time.time()) - self.access_cookies[cookie] if (age <= (self.access_cookie_lifetime + 1)): self.logger.info(log_msg + " " + cookie + " ACCEPTED COOKIE (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + cookie + " EXPIRED COOKIE (%ds old > %ds)" % (age, self.access_cookie_lifetime)) # Keep cookie for 2x lifetim in order to generate # helpful expired message if (age > (self.access_cookie_lifetime * 2)): del self.access_cookies[cookie] return False else: self.logger.info(log_msg + " " + cookie + " REJECTED COOKIE") return False
[ "def", "access_cookie_valid", "(", "self", ",", "cookie", ",", "log_msg", ")", ":", "if", "(", "cookie", "in", "self", ".", "access_cookies", ")", ":", "age", "=", "int", "(", "time", ".", "time", "(", ")", ")", "-", "self", ".", "access_cookies", "[...
Check access cookie validity. Returns true if the access cookie is valid. The set of allowed access cookies is stored in self.access_cookies. Uses log_msg as prefix to info level log message of accetance or rejection.
[ "Check", "access", "cookie", "validity", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L229-L255
7,306
zimeon/iiif
iiif/auth.py
IIIFAuth.access_token
def access_token(self, cookie): """Make and store access token as proxy for the access cookie. Create an access token to act as a proxy for access cookie, add it to the dict of accepted access tokens with (cookie, current timestamp) as the value. Return the access token. Return None if cookie is not set. """ if (cookie): token = self._generate_random_string(self.access_tokens) self.access_tokens[token] = (cookie, int(time.time())) return token else: return None
python
def access_token(self, cookie): if (cookie): token = self._generate_random_string(self.access_tokens) self.access_tokens[token] = (cookie, int(time.time())) return token else: return None
[ "def", "access_token", "(", "self", ",", "cookie", ")", ":", "if", "(", "cookie", ")", ":", "token", "=", "self", ".", "_generate_random_string", "(", "self", ".", "access_tokens", ")", "self", ".", "access_tokens", "[", "token", "]", "=", "(", "cookie",...
Make and store access token as proxy for the access cookie. Create an access token to act as a proxy for access cookie, add it to the dict of accepted access tokens with (cookie, current timestamp) as the value. Return the access token. Return None if cookie is not set.
[ "Make", "and", "store", "access", "token", "as", "proxy", "for", "the", "access", "cookie", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L257-L269
7,307
zimeon/iiif
iiif/auth.py
IIIFAuth.access_token_valid
def access_token_valid(self, token, log_msg): """Check token validity. Returns true if the token is valid. The set of allowed access tokens is stored in self.access_tokens. Uses log_msg as prefix to info level log message of acceptance or rejection. """ if (token in self.access_tokens): (cookie, issue_time) = self.access_tokens[token] age = int(time.time()) - issue_time if (age <= (self.access_token_lifetime + 1)): self.logger.info(log_msg + " " + token + " ACCEPTED TOKEN (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + token + " EXPIRED TOKEN (%ds old > %ds)" % (age, self.access_token_lifetime)) # Keep token for 2x lifetim in order to generate # helpful expired message if (age > (self.access_token_lifetime * 2)): del self.access_tokens[token] return False else: self.logger.info(log_msg + " " + token + " REJECTED TOKEN") return False
python
def access_token_valid(self, token, log_msg): if (token in self.access_tokens): (cookie, issue_time) = self.access_tokens[token] age = int(time.time()) - issue_time if (age <= (self.access_token_lifetime + 1)): self.logger.info(log_msg + " " + token + " ACCEPTED TOKEN (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + token + " EXPIRED TOKEN (%ds old > %ds)" % (age, self.access_token_lifetime)) # Keep token for 2x lifetim in order to generate # helpful expired message if (age > (self.access_token_lifetime * 2)): del self.access_tokens[token] return False else: self.logger.info(log_msg + " " + token + " REJECTED TOKEN") return False
[ "def", "access_token_valid", "(", "self", ",", "token", ",", "log_msg", ")", ":", "if", "(", "token", "in", "self", ".", "access_tokens", ")", ":", "(", "cookie", ",", "issue_time", ")", "=", "self", ".", "access_tokens", "[", "token", "]", "age", "=",...
Check token validity. Returns true if the token is valid. The set of allowed access tokens is stored in self.access_tokens. Uses log_msg as prefix to info level log message of acceptance or rejection.
[ "Check", "token", "validity", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L271-L298
7,308
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.info_authn
def info_authn(self): """Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token. """ authz_header = request.headers.get('Authorization', '[none]') if (not authz_header.startswith('Bearer ')): return False token = authz_header[7:] return self.access_token_valid( token, "info_authn: Authorization header")
python
def info_authn(self): authz_header = request.headers.get('Authorization', '[none]') if (not authz_header.startswith('Bearer ')): return False token = authz_header[7:] return self.access_token_valid( token, "info_authn: Authorization header")
[ "def", "info_authn", "(", "self", ")", ":", "authz_header", "=", "request", ".", "headers", ".", "get", "(", "'Authorization'", ",", "'[none]'", ")", "if", "(", "not", "authz_header", ".", "startswith", "(", "'Bearer '", ")", ")", ":", "return", "False", ...
Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token.
[ "Check", "to", "see", "if", "user", "if", "authenticated", "for", "info", ".", "json", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L23-L35
7,309
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.image_authn
def image_authn(self): """Check to see if user if authenticated for image requests. Must have access cookie with an appropriate value. """ authn_cookie = request.cookies.get( self.access_cookie_name, default='[none]') return self.access_cookie_valid(authn_cookie, "image_authn: auth cookie")
python
def image_authn(self): authn_cookie = request.cookies.get( self.access_cookie_name, default='[none]') return self.access_cookie_valid(authn_cookie, "image_authn: auth cookie")
[ "def", "image_authn", "(", "self", ")", ":", "authn_cookie", "=", "request", ".", "cookies", ".", "get", "(", "self", ".", "access_cookie_name", ",", "default", "=", "'[none]'", ")", "return", "self", ".", "access_cookie_valid", "(", "authn_cookie", ",", "\"...
Check to see if user if authenticated for image requests. Must have access cookie with an appropriate value.
[ "Check", "to", "see", "if", "user", "if", "authenticated", "for", "image", "requests", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L37-L44
7,310
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.logout_handler
def logout_handler(self, **args): """Handler for logout button. Delete cookies and return HTML that immediately closes window """ response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, expires=0) response.set_cookie(self.access_cookie_name, expires=0) response.headers['Access-Control-Allow-Origin'] = '*' return response
python
def logout_handler(self, **args): response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, expires=0) response.set_cookie(self.access_cookie_name, expires=0) response.headers['Access-Control-Allow-Origin'] = '*' return response
[ "def", "logout_handler", "(", "self", ",", "*", "*", "args", ")", ":", "response", "=", "make_response", "(", "\"<html><script>window.close();</script></html>\"", ",", "200", ",", "{", "'Content-Type'", ":", "\"text/html\"", "}", ")", "response", ".", "set_cookie"...
Handler for logout button. Delete cookies and return HTML that immediately closes window
[ "Handler", "for", "logout", "button", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L57-L68
7,311
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.find_binaries
def find_binaries(cls, tmpdir=None, shellsetup=None, pnmdir=None): """Set instance variables for directory and binary locations. FIXME - should accept params to set things other than defaults. """ cls.tmpdir = ('/tmp' if (tmpdir is None) else tmpdir) # Shell setup command (e.g set library path) cls.shellsetup = ('' if (shellsetup is None) else shellsetup) if (pnmdir is None): cls.pnmdir = '/usr/bin' for dir in ('/usr/local/bin', '/sw/bin'): if (os.path.isfile(os.path.join(dir, 'pngtopnm'))): cls.pnmdir = dir else: cls.pnmdir = pnmdir # Recklessly assume everything else under cls.pnmdir cls.pngtopnm = os.path.join(cls.pnmdir, 'pngtopnm') cls.jpegtopnm = os.path.join(cls.pnmdir, 'jpegtopnm') cls.pnmfile = os.path.join(cls.pnmdir, 'pnmfile') cls.pnmcut = os.path.join(cls.pnmdir, 'pnmcut') cls.pnmscale = os.path.join(cls.pnmdir, 'pnmscale') cls.pnmrotate = os.path.join(cls.pnmdir, 'pnmrotate') cls.pnmflip = os.path.join(cls.pnmdir, 'pnmflip') cls.pnmtopng = os.path.join(cls.pnmdir, 'pnmtopng') cls.ppmtopgm = os.path.join(cls.pnmdir, 'ppmtopgm') cls.pnmtotiff = os.path.join(cls.pnmdir, 'pnmtotiff') cls.pnmtojpeg = os.path.join(cls.pnmdir, 'pnmtojpeg') cls.pamditherbw = os.path.join(cls.pnmdir, 'pamditherbw') # Need djatoka to get jp2 output cls.djatoka_comp = '/Users/simeon/packages/adore-djatoka-1.1/bin/compress.sh'
python
def find_binaries(cls, tmpdir=None, shellsetup=None, pnmdir=None): cls.tmpdir = ('/tmp' if (tmpdir is None) else tmpdir) # Shell setup command (e.g set library path) cls.shellsetup = ('' if (shellsetup is None) else shellsetup) if (pnmdir is None): cls.pnmdir = '/usr/bin' for dir in ('/usr/local/bin', '/sw/bin'): if (os.path.isfile(os.path.join(dir, 'pngtopnm'))): cls.pnmdir = dir else: cls.pnmdir = pnmdir # Recklessly assume everything else under cls.pnmdir cls.pngtopnm = os.path.join(cls.pnmdir, 'pngtopnm') cls.jpegtopnm = os.path.join(cls.pnmdir, 'jpegtopnm') cls.pnmfile = os.path.join(cls.pnmdir, 'pnmfile') cls.pnmcut = os.path.join(cls.pnmdir, 'pnmcut') cls.pnmscale = os.path.join(cls.pnmdir, 'pnmscale') cls.pnmrotate = os.path.join(cls.pnmdir, 'pnmrotate') cls.pnmflip = os.path.join(cls.pnmdir, 'pnmflip') cls.pnmtopng = os.path.join(cls.pnmdir, 'pnmtopng') cls.ppmtopgm = os.path.join(cls.pnmdir, 'ppmtopgm') cls.pnmtotiff = os.path.join(cls.pnmdir, 'pnmtotiff') cls.pnmtojpeg = os.path.join(cls.pnmdir, 'pnmtojpeg') cls.pamditherbw = os.path.join(cls.pnmdir, 'pamditherbw') # Need djatoka to get jp2 output cls.djatoka_comp = '/Users/simeon/packages/adore-djatoka-1.1/bin/compress.sh'
[ "def", "find_binaries", "(", "cls", ",", "tmpdir", "=", "None", ",", "shellsetup", "=", "None", ",", "pnmdir", "=", "None", ")", ":", "cls", ".", "tmpdir", "=", "(", "'/tmp'", "if", "(", "tmpdir", "is", "None", ")", "else", "tmpdir", ")", "# Shell se...
Set instance variables for directory and binary locations. FIXME - should accept params to set things other than defaults.
[ "Set", "instance", "variables", "for", "directory", "and", "binary", "locations", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L41-L70
7,312
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_first
def do_first(self): """Create PNM file from input image file.""" pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
python
def do_first(self): pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
[ "def", "do_first", "(", "self", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "self", ".", "basename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "'iiif_netpbm_'", "+", "str", "(", "pid", ")", ")", "outfile", "="...
Create PNM file from input image file.
[ "Create", "PNM", "file", "from", "input", "image", "file", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L72-L90
7,313
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.file_type
def file_type(self, file): """Use python-magic to determine file type. Returns 'png' or 'jpg' on success, nothing on failure. """ try: magic_text = magic.from_file(file) if (isinstance(magic_text, bytes)): # In python2 and travis python3 (?!) decode to get unicode string magic_text = magic_text.decode('utf-8') except (TypeError, IOError): return if (re.search('PNG image data', magic_text)): return('png') elif (re.search('JPEG image data', magic_text)): return('jpg') # failed return
python
def file_type(self, file): try: magic_text = magic.from_file(file) if (isinstance(magic_text, bytes)): # In python2 and travis python3 (?!) decode to get unicode string magic_text = magic_text.decode('utf-8') except (TypeError, IOError): return if (re.search('PNG image data', magic_text)): return('png') elif (re.search('JPEG image data', magic_text)): return('jpg') # failed return
[ "def", "file_type", "(", "self", ",", "file", ")", ":", "try", ":", "magic_text", "=", "magic", ".", "from_file", "(", "file", ")", "if", "(", "isinstance", "(", "magic_text", ",", "bytes", ")", ")", ":", "# In python2 and travis python3 (?!) decode to get uni...
Use python-magic to determine file type. Returns 'png' or 'jpg' on success, nothing on failure.
[ "Use", "python", "-", "magic", "to", "determine", "file", "type", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L244-L261
7,314
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.image_size
def image_size(self, pnmfile): """Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255 """ pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
python
def image_size(self, pnmfile): pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
[ "def", "image_size", "(", "self", ",", "pnmfile", ")", ":", "pout", "=", "os", ".", "popen", "(", "self", ".", "shellsetup", "+", "self", ".", "pnmfile", "+", "' '", "+", "pnmfile", ",", "'r'", ")", "pnmfileout", "=", "pout", ".", "read", "(", "200...
Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255
[ "Get", "width", "and", "height", "of", "pnm", "file", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L263-L280
7,315
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.shell_call
def shell_call(self, shellcmd): """Shell call with necessary setup first.""" return(subprocess.call(self.shellsetup + shellcmd, shell=True))
python
def shell_call(self, shellcmd): return(subprocess.call(self.shellsetup + shellcmd, shell=True))
[ "def", "shell_call", "(", "self", ",", "shellcmd", ")", ":", "return", "(", "subprocess", ".", "call", "(", "self", ".", "shellsetup", "+", "shellcmd", ",", "shell", "=", "True", ")", ")" ]
Shell call with necessary setup first.
[ "Shell", "call", "with", "necessary", "setup", "first", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L282-L284
7,316
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.cleanup
def cleanup(self): """Clean up any temporary files.""" for file in glob.glob(self.basename + '*'): os.unlink(file)
python
def cleanup(self): for file in glob.glob(self.basename + '*'): os.unlink(file)
[ "def", "cleanup", "(", "self", ")", ":", "for", "file", "in", "glob", ".", "glob", "(", "self", ".", "basename", "+", "'*'", ")", ":", "os", ".", "unlink", "(", "file", ")" ]
Clean up any temporary files.
[ "Clean", "up", "any", "temporary", "files", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L286-L289
7,317
zimeon/iiif
iiif/error.py
IIIFError.image_server_response
def image_server_response(self, api_version=None): """Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response. """ headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
python
def image_server_response(self, api_version=None): headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
[ "def", "image_server_response", "(", "self", ",", "api_version", "=", "None", ")", ":", "headers", "=", "dict", "(", "self", ".", "headers", ")", "if", "(", "api_version", "<", "'1.1'", ")", ":", "headers", "[", "'Content-Type'", "]", "=", "'text/xml'", ...
Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response.
[ "Response", "code", "and", "headers", "for", "image", "server", "error", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L50-L69
7,318
zimeon/iiif
iiif/error.py
IIIFError.as_xml
def as_xml(self): """XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error> """ # Build tree spacing = ("\n" if (self.pretty_xml) else "") root = Element('error', {'xmlns': I3F_NS}) root.text = spacing e_parameter = Element('parameter', {}) e_parameter.text = self.parameter e_parameter.tail = spacing root.append(e_parameter) if (self.text): e_text = Element('text', {}) e_text.text = self.text e_text.tail = spacing root.append(e_text) # Write out as XML document to return tree = ElementTree(root) xml_buf = io.BytesIO() if (sys.version_info < (2, 7)): tree.write(xml_buf, encoding='UTF-8') else: tree.write(xml_buf, encoding='UTF-8', xml_declaration=True, method='xml') return(xml_buf.getvalue().decode('utf-8'))
python
def as_xml(self): # Build tree spacing = ("\n" if (self.pretty_xml) else "") root = Element('error', {'xmlns': I3F_NS}) root.text = spacing e_parameter = Element('parameter', {}) e_parameter.text = self.parameter e_parameter.tail = spacing root.append(e_parameter) if (self.text): e_text = Element('text', {}) e_text.text = self.text e_text.tail = spacing root.append(e_text) # Write out as XML document to return tree = ElementTree(root) xml_buf = io.BytesIO() if (sys.version_info < (2, 7)): tree.write(xml_buf, encoding='UTF-8') else: tree.write(xml_buf, encoding='UTF-8', xml_declaration=True, method='xml') return(xml_buf.getvalue().decode('utf-8'))
[ "def", "as_xml", "(", "self", ")", ":", "# Build tree", "spacing", "=", "(", "\"\\n\"", "if", "(", "self", ".", "pretty_xml", ")", "else", "\"\"", ")", "root", "=", "Element", "(", "'error'", ",", "{", "'xmlns'", ":", "I3F_NS", "}", ")", "root", ".",...
XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error>
[ "XML", "representation", "of", "the", "error", "to", "be", "used", "in", "HTTP", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L71-L98
7,319
zimeon/iiif
iiif/error.py
IIIFError.as_txt
def as_txt(self): """Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging. """ s = "IIIF Image Server Error\n\n" s += self.text if (self.text) else 'UNKNOWN_ERROR' s += "\n\n" if (self.parameter): s += "parameter=%s\n" % self.parameter if (self.code): s += "code=%d\n\n" % self.code for header in sorted(self.headers): s += "header %s=%s\n" % (header, self.headers[header]) return s
python
def as_txt(self): s = "IIIF Image Server Error\n\n" s += self.text if (self.text) else 'UNKNOWN_ERROR' s += "\n\n" if (self.parameter): s += "parameter=%s\n" % self.parameter if (self.code): s += "code=%d\n\n" % self.code for header in sorted(self.headers): s += "header %s=%s\n" % (header, self.headers[header]) return s
[ "def", "as_txt", "(", "self", ")", ":", "s", "=", "\"IIIF Image Server Error\\n\\n\"", "s", "+=", "self", ".", "text", "if", "(", "self", ".", "text", ")", "else", "'UNKNOWN_ERROR'", "s", "+=", "\"\\n\\n\"", "if", "(", "self", ".", "parameter", ")", ":",...
Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging.
[ "Text", "rendering", "of", "error", "response", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L100-L116
7,320
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.login_handler
def login_handler(self, config=None, prefix=None, **args): """OAuth starts here, redirect user to Google.""" params = { 'response_type': 'code', 'client_id': self.google_api_client_id, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'scope': self.google_api_scope, 'state': self.request_args_get('next', default=''), } url = self.google_oauth2_url + 'auth?' + urlencode(params) return self.login_handler_redirect(url)
python
def login_handler(self, config=None, prefix=None, **args): params = { 'response_type': 'code', 'client_id': self.google_api_client_id, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'scope': self.google_api_scope, 'state': self.request_args_get('next', default=''), } url = self.google_oauth2_url + 'auth?' + urlencode(params) return self.login_handler_redirect(url)
[ "def", "login_handler", "(", "self", ",", "config", "=", "None", ",", "prefix", "=", "None", ",", "*", "*", "args", ")", ":", "params", "=", "{", "'response_type'", ":", "'code'", ",", "'client_id'", ":", "self", ".", "google_api_client_id", ",", "'redir...
OAuth starts here, redirect user to Google.
[ "OAuth", "starts", "here", "redirect", "user", "to", "Google", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L53-L64
7,321
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.google_get_token
def google_get_token(self, config, prefix): """Make request to Google API to get token.""" params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
python
def google_get_token(self, config, prefix): params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
[ "def", "google_get_token", "(", "self", ",", "config", ",", "prefix", ")", ":", "params", "=", "{", "'code'", ":", "self", ".", "request_args_get", "(", "'code'", ",", "default", "=", "''", ")", ",", "'client_id'", ":", "self", ".", "google_api_client_id",...
Make request to Google API to get token.
[ "Make", "request", "to", "Google", "API", "to", "get", "token", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L84-L100
7,322
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.google_get_data
def google_get_data(self, config, response): """Make request to Google API to get profile data for the user.""" params = { 'access_token': response['access_token'], } payload = urlencode(params) url = self.google_api_url + 'userinfo?' + payload req = Request(url) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
python
def google_get_data(self, config, response): params = { 'access_token': response['access_token'], } payload = urlencode(params) url = self.google_api_url + 'userinfo?' + payload req = Request(url) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
[ "def", "google_get_data", "(", "self", ",", "config", ",", "response", ")", ":", "params", "=", "{", "'access_token'", ":", "response", "[", "'access_token'", "]", ",", "}", "payload", "=", "urlencode", "(", "params", ")", "url", "=", "self", ".", "googl...
Make request to Google API to get profile data for the user.
[ "Make", "request", "to", "Google", "API", "to", "get", "profile", "data", "for", "the", "user", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L102-L111
7,323
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.compliance_uri
def compliance_uri(self): """Compliance URI based on api_version. Value is based on api_version and complicance_level, will be None if either are unset/unrecognized. The assumption here is that the api_version and level are orthogonal, override this method if that isn't true. """ if (self.api_version == '1.0'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d' elif (self.api_version == '1.1'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level%d' elif (self.api_version == '2.0' or self.api_version == '2.1'): uri_pattern = r'http://iiif.io/api/image/2/level%d.json' else: return if (self.compliance_level is None): return return(uri_pattern % self.compliance_level)
python
def compliance_uri(self): if (self.api_version == '1.0'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d' elif (self.api_version == '1.1'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level%d' elif (self.api_version == '2.0' or self.api_version == '2.1'): uri_pattern = r'http://iiif.io/api/image/2/level%d.json' else: return if (self.compliance_level is None): return return(uri_pattern % self.compliance_level)
[ "def", "compliance_uri", "(", "self", ")", ":", "if", "(", "self", ".", "api_version", "==", "'1.0'", ")", ":", "uri_pattern", "=", "r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d'", "elif", "(", "self", ".", "api_version", "==", "'1.1'", ")", ...
Compliance URI based on api_version. Value is based on api_version and complicance_level, will be None if either are unset/unrecognized. The assumption here is that the api_version and level are orthogonal, override this method if that isn't true.
[ "Compliance", "URI", "based", "on", "api_version", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L49-L68
7,324
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.derive
def derive(self, srcfile=None, request=None, outfile=None): """Do sequence of manipulations for IIIF to derive output image. Named argments: srcfile -- source image file request -- IIIFRequest object with parsed parameters outfile -- output image file. If set the the output file will be written to that file, otherwise a new temporary file will be created and outfile set to its location. See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order Region THEN Size THEN Rotation THEN Quality THEN Format Typical use: r = IIIFRequest(region=...) m = IIIFManipulator() try: m.derive(srcfile='a.jpg',request=r) # .. serve m.outfile except IIIFError as e: # .. finally: m.cleanup() #removes temp m.outfile """ # set if specified if (srcfile is not None): self.srcfile = srcfile if (request is not None): self.request = request if (outfile is not None): self.outfile = outfile if (self.outfile is not None): # create path to output dir if necessary dir = os.path.dirname(self.outfile) if (not os.path.exists(dir)): os.makedirs(dir) # self.do_first() (x, y, w, h) = self.region_to_apply() self.do_region(x, y, w, h) (w, h) = self.size_to_apply() self.do_size(w, h) (mirror, rot) = self.rotation_to_apply(no_mirror=True) self.do_rotation(mirror, rot) (quality) = self.quality_to_apply() self.do_quality(quality) self.do_format(self.request.format) self.do_last() return(self.outfile, self.mime_type)
python
def derive(self, srcfile=None, request=None, outfile=None): # set if specified if (srcfile is not None): self.srcfile = srcfile if (request is not None): self.request = request if (outfile is not None): self.outfile = outfile if (self.outfile is not None): # create path to output dir if necessary dir = os.path.dirname(self.outfile) if (not os.path.exists(dir)): os.makedirs(dir) # self.do_first() (x, y, w, h) = self.region_to_apply() self.do_region(x, y, w, h) (w, h) = self.size_to_apply() self.do_size(w, h) (mirror, rot) = self.rotation_to_apply(no_mirror=True) self.do_rotation(mirror, rot) (quality) = self.quality_to_apply() self.do_quality(quality) self.do_format(self.request.format) self.do_last() return(self.outfile, self.mime_type)
[ "def", "derive", "(", "self", ",", "srcfile", "=", "None", ",", "request", "=", "None", ",", "outfile", "=", "None", ")", ":", "# set if specified", "if", "(", "srcfile", "is", "not", "None", ")", ":", "self", ".", "srcfile", "=", "srcfile", "if", "(...
Do sequence of manipulations for IIIF to derive output image. Named argments: srcfile -- source image file request -- IIIFRequest object with parsed parameters outfile -- output image file. If set the the output file will be written to that file, otherwise a new temporary file will be created and outfile set to its location. See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order Region THEN Size THEN Rotation THEN Quality THEN Format Typical use: r = IIIFRequest(region=...) m = IIIFManipulator() try: m.derive(srcfile='a.jpg',request=r) # .. serve m.outfile except IIIFError as e: # .. finally: m.cleanup() #removes temp m.outfile
[ "Do", "sequence", "of", "manipulations", "for", "IIIF", "to", "derive", "output", "image", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L70-L120
7,325
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_region
def do_region(self, x, y, w, h): """Null implementation of region selection.""" if (x is not None): raise IIIFError(code=501, parameter="region", text="Null manipulator supports only region=/full/.")
python
def do_region(self, x, y, w, h): if (x is not None): raise IIIFError(code=501, parameter="region", text="Null manipulator supports only region=/full/.")
[ "def", "do_region", "(", "self", ",", "x", ",", "y", ",", "w", ",", "h", ")", ":", "if", "(", "x", "is", "not", "None", ")", ":", "raise", "IIIFError", "(", "code", "=", "501", ",", "parameter", "=", "\"region\"", ",", "text", "=", "\"Null manipu...
Null implementation of region selection.
[ "Null", "implementation", "of", "region", "selection", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L130-L134
7,326
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_quality
def do_quality(self, quality): """Null implementation of quality.""" if (self.api_version >= '2.0'): if (quality != "default"): raise IIIFError(code=501, parameter="default", text="Null manipulator supports only quality=default.") else: # versions 1.0 and 1.1 if (quality != "native"): raise IIIFError(code=501, parameter="native", text="Null manipulator supports only quality=native.")
python
def do_quality(self, quality): if (self.api_version >= '2.0'): if (quality != "default"): raise IIIFError(code=501, parameter="default", text="Null manipulator supports only quality=default.") else: # versions 1.0 and 1.1 if (quality != "native"): raise IIIFError(code=501, parameter="native", text="Null manipulator supports only quality=native.")
[ "def", "do_quality", "(", "self", ",", "quality", ")", ":", "if", "(", "self", ".", "api_version", ">=", "'2.0'", ")", ":", "if", "(", "quality", "!=", "\"default\"", ")", ":", "raise", "IIIFError", "(", "code", "=", "501", ",", "parameter", "=", "\"...
Null implementation of quality.
[ "Null", "implementation", "of", "quality", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L151-L160
7,327
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_format
def do_format(self, format): """Null implementation of format selection. This is the last step, this null implementation does not accept any specification of a format because we don't even know what the input format is. """ if (format is not None): raise IIIFError(code=415, parameter="format", text="Null manipulator does not support specification of output format.") # if (self.outfile is None): self.outfile = self.srcfile else: try: shutil.copyfile(self.srcfile, self.outfile) except IOError as e: raise IIIFError(code=500, text="Failed to copy file (%s)." % (str(e))) self.mime_type = None
python
def do_format(self, format): if (format is not None): raise IIIFError(code=415, parameter="format", text="Null manipulator does not support specification of output format.") # if (self.outfile is None): self.outfile = self.srcfile else: try: shutil.copyfile(self.srcfile, self.outfile) except IOError as e: raise IIIFError(code=500, text="Failed to copy file (%s)." % (str(e))) self.mime_type = None
[ "def", "do_format", "(", "self", ",", "format", ")", ":", "if", "(", "format", "is", "not", "None", ")", ":", "raise", "IIIFError", "(", "code", "=", "415", ",", "parameter", "=", "\"format\"", ",", "text", "=", "\"Null manipulator does not support specifica...
Null implementation of format selection. This is the last step, this null implementation does not accept any specification of a format because we don't even know what the input format is.
[ "Null", "implementation", "of", "format", "selection", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L162-L181
7,328
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.region_to_apply
def region_to_apply(self): """Return the x,y,w,h parameters to extract given image width and height. Assume image width and height are available in self.width and self.height, and self.request is IIIFRequest object Expected use: (x,y,w,h) = self.region_to_apply() if (x is None): # full image else: # extract Returns (None,None,None,None) if no extraction is required. """ if (self.request.region_full or (self.request.region_pct and self.request.region_xywh == (0, 0, 100, 100))): return(None, None, None, None) # Cannot do anything else unless we know size (in self.width and # self.height) if (self.width <= 0 or self.height <= 0): raise IIIFError(code=501, parameter='region', text="Region parameters require knowledge of image size which is not implemented.") if (self.request.region_square): if (self.width <= self.height): y_offset = (self.height - self.width) / 2 return(0, y_offset, self.width, self.width) else: # self.width>self.height x_offset = (self.width - self.height) / 2 return(x_offset, 0, self.height, self.height) # pct or explicit pixel sizes pct = self.request.region_pct (x, y, w, h) = self.request.region_xywh # Convert pct to pixels based on actual size if (pct): x = int((x / 100.0) * self.width + 0.5) y = int((y / 100.0) * self.height + 0.5) w = int((w / 100.0) * self.width + 0.5) h = int((h / 100.0) * self.height + 0.5) # Check if boundary extends beyond image and truncate if ((x + w) > self.width): w = self.width - x if ((y + h) > self.height): h = self.height - y # Final check to see if we have the whole image if (w == 0 or h == 0): raise IIIFZeroSizeError(code=400, parameter='region', text="Region parameters would result in zero size result image.") if (x == 0 and y == 0 and w == self.width and h == self.height): return(None, None, None, None) return(x, y, w, h)
python
def region_to_apply(self): if (self.request.region_full or (self.request.region_pct and self.request.region_xywh == (0, 0, 100, 100))): return(None, None, None, None) # Cannot do anything else unless we know size (in self.width and # self.height) if (self.width <= 0 or self.height <= 0): raise IIIFError(code=501, parameter='region', text="Region parameters require knowledge of image size which is not implemented.") if (self.request.region_square): if (self.width <= self.height): y_offset = (self.height - self.width) / 2 return(0, y_offset, self.width, self.width) else: # self.width>self.height x_offset = (self.width - self.height) / 2 return(x_offset, 0, self.height, self.height) # pct or explicit pixel sizes pct = self.request.region_pct (x, y, w, h) = self.request.region_xywh # Convert pct to pixels based on actual size if (pct): x = int((x / 100.0) * self.width + 0.5) y = int((y / 100.0) * self.height + 0.5) w = int((w / 100.0) * self.width + 0.5) h = int((h / 100.0) * self.height + 0.5) # Check if boundary extends beyond image and truncate if ((x + w) > self.width): w = self.width - x if ((y + h) > self.height): h = self.height - y # Final check to see if we have the whole image if (w == 0 or h == 0): raise IIIFZeroSizeError(code=400, parameter='region', text="Region parameters would result in zero size result image.") if (x == 0 and y == 0 and w == self.width and h == self.height): return(None, None, None, None) return(x, y, w, h)
[ "def", "region_to_apply", "(", "self", ")", ":", "if", "(", "self", ".", "request", ".", "region_full", "or", "(", "self", ".", "request", ".", "region_pct", "and", "self", ".", "request", ".", "region_xywh", "==", "(", "0", ",", "0", ",", "100", ","...
Return the x,y,w,h parameters to extract given image width and height. Assume image width and height are available in self.width and self.height, and self.request is IIIFRequest object Expected use: (x,y,w,h) = self.region_to_apply() if (x is None): # full image else: # extract Returns (None,None,None,None) if no extraction is required.
[ "Return", "the", "x", "y", "w", "h", "parameters", "to", "extract", "given", "image", "width", "and", "height", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L192-L243
7,329
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.size_to_apply
def size_to_apply(self): """Calculate size of image scaled using size parameters. Assumes current image width and height are available in self.width and self.height, and self.request is IIIFRequest object. Formats are: w, ,h w,h pct:p !w,h full max Returns (None,None) if no scaling is required. If max is requested and neither max_area or max_width are specified then this is the same as full. Otherwise the limits are used to determine the size. """ if (self.request.size_full or self.request.size_pct == 100.0): # full size return(None, None) # Not trivially full size, look at possibilities in turn w = self.width h = self.height if (self.request.size_max): # use size limits if present, else full if (self.max_area and self.max_area < (w * h)): scale = (float(self.max_area) / float(w * h)) ** 0.5 w = int(w * scale + 0.5) h = int(h * scale + 0.5) if (self.max_width): max_height = self.max_height if self.max_height is not None else self.max_width if (self.max_width < w): # calculate wrt original width, height rather than # w, h to avoid compounding rounding issues scale = float(self.max_width) / float(self.width) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) if (max_height < h): scale = float(max_height) / float(self.height) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) elif (self.request.size_pct is not None): w = int(self.width * self.request.size_pct / 100.0 + 0.5) h = int(self.height * self.request.size_pct / 100.0 + 0.5) elif (self.request.size_bang): # Have "!w,h" form (mw, mh) = self.request.size_wh # Pick smaller fraction and then work from that... frac = min((float(mw) / float(self.width)), (float(mh) / float(self.height))) w = int(self.width * frac + 0.5) h = int(self.height * frac + 0.5) else: # Must now be "w,h", "w," or ",h". If both are specified then this will the size, # otherwise find other to keep aspect ratio (w, h) = self.request.size_wh if (w is None): w = int(self.width * h / self.height + 0.5) elif (h is None): h = int(self.height * w / self.width + 0.5) # Now have w,h, sanity check and return if (w == 0 or h == 0): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameter would result in zero size result image (%d,%d)." % (w, h)) # Below would be test for scaling up image size, this is allowed by spec # if ( w>self.width or h>self.height ): # raise IIIFError(code=400,parameter='size', # text="Size requests scaling up image to larger than orginal.") if (w == self.width and h == self.height): return(None, None) return(w, h)
python
def size_to_apply(self): if (self.request.size_full or self.request.size_pct == 100.0): # full size return(None, None) # Not trivially full size, look at possibilities in turn w = self.width h = self.height if (self.request.size_max): # use size limits if present, else full if (self.max_area and self.max_area < (w * h)): scale = (float(self.max_area) / float(w * h)) ** 0.5 w = int(w * scale + 0.5) h = int(h * scale + 0.5) if (self.max_width): max_height = self.max_height if self.max_height is not None else self.max_width if (self.max_width < w): # calculate wrt original width, height rather than # w, h to avoid compounding rounding issues scale = float(self.max_width) / float(self.width) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) if (max_height < h): scale = float(max_height) / float(self.height) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) elif (self.request.size_pct is not None): w = int(self.width * self.request.size_pct / 100.0 + 0.5) h = int(self.height * self.request.size_pct / 100.0 + 0.5) elif (self.request.size_bang): # Have "!w,h" form (mw, mh) = self.request.size_wh # Pick smaller fraction and then work from that... frac = min((float(mw) / float(self.width)), (float(mh) / float(self.height))) w = int(self.width * frac + 0.5) h = int(self.height * frac + 0.5) else: # Must now be "w,h", "w," or ",h". If both are specified then this will the size, # otherwise find other to keep aspect ratio (w, h) = self.request.size_wh if (w is None): w = int(self.width * h / self.height + 0.5) elif (h is None): h = int(self.height * w / self.width + 0.5) # Now have w,h, sanity check and return if (w == 0 or h == 0): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameter would result in zero size result image (%d,%d)." % (w, h)) # Below would be test for scaling up image size, this is allowed by spec # if ( w>self.width or h>self.height ): # raise IIIFError(code=400,parameter='size', # text="Size requests scaling up image to larger than orginal.") if (w == self.width and h == self.height): return(None, None) return(w, h)
[ "def", "size_to_apply", "(", "self", ")", ":", "if", "(", "self", ".", "request", ".", "size_full", "or", "self", ".", "request", ".", "size_pct", "==", "100.0", ")", ":", "# full size", "return", "(", "None", ",", "None", ")", "# Not trivially full size, ...
Calculate size of image scaled using size parameters. Assumes current image width and height are available in self.width and self.height, and self.request is IIIFRequest object. Formats are: w, ,h w,h pct:p !w,h full max Returns (None,None) if no scaling is required. If max is requested and neither max_area or max_width are specified then this is the same as full. Otherwise the limits are used to determine the size.
[ "Calculate", "size", "of", "image", "scaled", "using", "size", "parameters", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L245-L313
7,330
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.quality_to_apply
def quality_to_apply(self): """Value of quality parameter to use in processing request. Simple substitution of 'native' or 'default' if no quality parameter is specified. """ if (self.request.quality is None): if (self.api_version <= '1.1'): return('native') else: return('default') return(self.request.quality)
python
def quality_to_apply(self): if (self.request.quality is None): if (self.api_version <= '1.1'): return('native') else: return('default') return(self.request.quality)
[ "def", "quality_to_apply", "(", "self", ")", ":", "if", "(", "self", ".", "request", ".", "quality", "is", "None", ")", ":", "if", "(", "self", ".", "api_version", "<=", "'1.1'", ")", ":", "return", "(", "'native'", ")", "else", ":", "return", "(", ...
Value of quality parameter to use in processing request. Simple substitution of 'native' or 'default' if no quality parameter is specified.
[ "Value", "of", "quality", "parameter", "to", "use", "in", "processing", "request", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L331-L342
7,331
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.scale_factors
def scale_factors(self, tile_width, tile_height=None): """Return a set of scale factors for given tile and window size. Gives a set of scale factors, starting at 1, and in multiples of 2. Largest scale_factor is so that one tile will cover the entire image (self.width,self.height). If tile_height is not specified then tiles are assumed to be squares of tile_width pixels. """ if (not tile_height): tile_height = tile_width sf = 1 scale_factors = [sf] for j in range(30): # limit of 2^30, should be enough! sf = 2 * sf if (tile_width * sf > self.width and tile_height * sf > self.height): break scale_factors.append(sf) return scale_factors
python
def scale_factors(self, tile_width, tile_height=None): if (not tile_height): tile_height = tile_width sf = 1 scale_factors = [sf] for j in range(30): # limit of 2^30, should be enough! sf = 2 * sf if (tile_width * sf > self.width and tile_height * sf > self.height): break scale_factors.append(sf) return scale_factors
[ "def", "scale_factors", "(", "self", ",", "tile_width", ",", "tile_height", "=", "None", ")", ":", "if", "(", "not", "tile_height", ")", ":", "tile_height", "=", "tile_width", "sf", "=", "1", "scale_factors", "=", "[", "sf", "]", "for", "j", "in", "ran...
Return a set of scale factors for given tile and window size. Gives a set of scale factors, starting at 1, and in multiples of 2. Largest scale_factor is so that one tile will cover the entire image (self.width,self.height). If tile_height is not specified then tiles are assumed to be squares of tile_width pixels.
[ "Return", "a", "set", "of", "scale", "factors", "for", "given", "tile", "and", "window", "size", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L353-L373
7,332
zimeon/iiif
iiif/generators/mandlebrot_100k.py
PixelGen.color
def color(self, n): """Color of pixel that reached limit after n iterations. Returns a color tuple for use with PIL, tending toward red as we tend toward self.max_iter iterations. """ red = int(n * self.shade_factor) if (red > 255): red = 255 return (red, 50, 100)
python
def color(self, n): red = int(n * self.shade_factor) if (red > 255): red = 255 return (red, 50, 100)
[ "def", "color", "(", "self", ",", "n", ")", ":", "red", "=", "int", "(", "n", "*", "self", ".", "shade_factor", ")", "if", "(", "red", ">", "255", ")", ":", "red", "=", "255", "return", "(", "red", ",", "50", ",", "100", ")" ]
Color of pixel that reached limit after n iterations. Returns a color tuple for use with PIL, tending toward red as we tend toward self.max_iter iterations.
[ "Color", "of", "pixel", "that", "reached", "limit", "after", "n", "iterations", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/mandlebrot_100k.py#L44-L53
7,333
zimeon/iiif
iiif/generators/mandlebrot_100k.py
PixelGen.mpixel
def mpixel(self, z, n=0): """Iteration in Mandlebrot coordinate z.""" z = z * z + self.c if (abs(z) > 2.0): return self.color(n) n += 1 if (n > self.max_iter): return None return self.mpixel(z, n)
python
def mpixel(self, z, n=0): z = z * z + self.c if (abs(z) > 2.0): return self.color(n) n += 1 if (n > self.max_iter): return None return self.mpixel(z, n)
[ "def", "mpixel", "(", "self", ",", "z", ",", "n", "=", "0", ")", ":", "z", "=", "z", "*", "z", "+", "self", ".", "c", "if", "(", "abs", "(", "z", ")", ">", "2.0", ")", ":", "return", "self", ".", "color", "(", "n", ")", "n", "+=", "1", ...
Iteration in Mandlebrot coordinate z.
[ "Iteration", "in", "Mandlebrot", "coordinate", "z", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/mandlebrot_100k.py#L55-L63
7,334
zimeon/iiif
iiif/static.py
static_partial_tile_sizes
def static_partial_tile_sizes(width, height, tilesize, scale_factors): """Generator for partial tile sizes for zoomed in views. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles scale_factors -- iterable of scale factors, typically [1,2,4..] Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile """ for sf in scale_factors: if (sf * tilesize >= width and sf * tilesize >= height): continue # avoid any full-region tiles rts = tilesize * sf # tile size in original region xt = (width - 1) // rts + 1 yt = (height - 1) // rts + 1 for nx in range(xt): rx = nx * rts rxe = rx + rts if (rxe > width): rxe = width rw = rxe - rx # same as sw = int(math.ceil(rw/float(sf))) sw = (rw + sf - 1) // sf for ny in range(yt): ry = ny * rts rye = ry + rts if (rye > height): rye = height rh = rye - ry # same as sh = int(math.ceil(rh/float(sf))) sh = (rh + sf - 1) // sf yield([rx, ry, rw, rh], [sw, sh])
python
def static_partial_tile_sizes(width, height, tilesize, scale_factors): for sf in scale_factors: if (sf * tilesize >= width and sf * tilesize >= height): continue # avoid any full-region tiles rts = tilesize * sf # tile size in original region xt = (width - 1) // rts + 1 yt = (height - 1) // rts + 1 for nx in range(xt): rx = nx * rts rxe = rx + rts if (rxe > width): rxe = width rw = rxe - rx # same as sw = int(math.ceil(rw/float(sf))) sw = (rw + sf - 1) // sf for ny in range(yt): ry = ny * rts rye = ry + rts if (rye > height): rye = height rh = rye - ry # same as sh = int(math.ceil(rh/float(sf))) sh = (rh + sf - 1) // sf yield([rx, ry, rw, rh], [sw, sh])
[ "def", "static_partial_tile_sizes", "(", "width", ",", "height", ",", "tilesize", ",", "scale_factors", ")", ":", "for", "sf", "in", "scale_factors", ":", "if", "(", "sf", "*", "tilesize", ">=", "width", "and", "sf", "*", "tilesize", ">=", "height", ")", ...
Generator for partial tile sizes for zoomed in views. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles scale_factors -- iterable of scale factors, typically [1,2,4..] Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
[ "Generator", "for", "partial", "tile", "sizes", "for", "zoomed", "in", "views", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L21-L54
7,335
zimeon/iiif
iiif/static.py
static_full_sizes
def static_full_sizes(width, height, tilesize): """Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize. """ # FIXME - Not sure what correct algorithm is for this, from # observation of Openseadragon it seems that one keeps halving # the pixel size of the full image until until both width and # height are less than the tile size. After that all subsequent # halving of the image size are used, all the way down to 1,1. # It seems that without these reduced size full-region images, # OpenSeadragon will not display any unzoomed image in small windows. # # I do not understand the algorithm that OpenSeadragon uses (or # know where it is in the code) to decide how small a version of # the complete image to request. It seems that there is a bug in # OpenSeadragon here because in some cases it requests images # of size 1,1 multiple times, which is anyway a useless image. for level in range(0, 20): factor = 2.0**level sw = int(width / factor + 0.5) sh = int(height / factor + 0.5) if (sw < tilesize and sh < tilesize): if (sw < 1 or sh < 1): break yield([sw, sh])
python
def static_full_sizes(width, height, tilesize): # FIXME - Not sure what correct algorithm is for this, from # observation of Openseadragon it seems that one keeps halving # the pixel size of the full image until until both width and # height are less than the tile size. After that all subsequent # halving of the image size are used, all the way down to 1,1. # It seems that without these reduced size full-region images, # OpenSeadragon will not display any unzoomed image in small windows. # # I do not understand the algorithm that OpenSeadragon uses (or # know where it is in the code) to decide how small a version of # the complete image to request. It seems that there is a bug in # OpenSeadragon here because in some cases it requests images # of size 1,1 multiple times, which is anyway a useless image. for level in range(0, 20): factor = 2.0**level sw = int(width / factor + 0.5) sh = int(height / factor + 0.5) if (sw < tilesize and sh < tilesize): if (sw < 1 or sh < 1): break yield([sw, sh])
[ "def", "static_full_sizes", "(", "width", ",", "height", ",", "tilesize", ")", ":", "# FIXME - Not sure what correct algorithm is for this, from", "# observation of Openseadragon it seems that one keeps halving", "# the pixel size of the full image until until both width and", "# height ar...
Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize.
[ "Generator", "for", "scaled", "-", "down", "full", "image", "sizes", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L57-L89
7,336
zimeon/iiif
iiif/static.py
IIIFStatic.parse_extra
def parse_extra(self, extra): """Parse extra request parameters to IIIFRequest object.""" if extra.startswith('/'): extra = extra[1:] r = IIIFRequest(identifier='dummy', api_version=self.api_version) r.parse_url(extra) if (r.info): raise IIIFStaticError("Attempt to specify Image Information in extras.") return(r)
python
def parse_extra(self, extra): if extra.startswith('/'): extra = extra[1:] r = IIIFRequest(identifier='dummy', api_version=self.api_version) r.parse_url(extra) if (r.info): raise IIIFStaticError("Attempt to specify Image Information in extras.") return(r)
[ "def", "parse_extra", "(", "self", ",", "extra", ")", ":", "if", "extra", ".", "startswith", "(", "'/'", ")", ":", "extra", "=", "extra", "[", "1", ":", "]", "r", "=", "IIIFRequest", "(", "identifier", "=", "'dummy'", ",", "api_version", "=", "self",...
Parse extra request parameters to IIIFRequest object.
[ "Parse", "extra", "request", "parameters", "to", "IIIFRequest", "object", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L191-L200
7,337
zimeon/iiif
iiif/static.py
IIIFStatic.get_osd_config
def get_osd_config(self, osd_version): """Select appropriate portion of config. If the version requested is not supported the raise an exception with a helpful error message listing the versions supported. """ if (osd_version in self.osd_config): return(self.osd_config[osd_version]) else: raise IIIFStaticError("OpenSeadragon version %s not supported, available versions are %s" % (osd_version, ', '.join(sorted(self.osd_config.keys()))))
python
def get_osd_config(self, osd_version): if (osd_version in self.osd_config): return(self.osd_config[osd_version]) else: raise IIIFStaticError("OpenSeadragon version %s not supported, available versions are %s" % (osd_version, ', '.join(sorted(self.osd_config.keys()))))
[ "def", "get_osd_config", "(", "self", ",", "osd_version", ")", ":", "if", "(", "osd_version", "in", "self", ".", "osd_config", ")", ":", "return", "(", "self", ".", "osd_config", "[", "osd_version", "]", ")", "else", ":", "raise", "IIIFStaticError", "(", ...
Select appropriate portion of config. If the version requested is not supported the raise an exception with a helpful error message listing the versions supported.
[ "Select", "appropriate", "portion", "of", "config", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L202-L212
7,338
zimeon/iiif
iiif/static.py
IIIFStatic.generate
def generate(self, src=None, identifier=None): """Generate static files for one source image.""" self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
python
def generate(self, src=None, identifier=None): self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
[ "def", "generate", "(", "self", ",", "src", "=", "None", ",", "identifier", "=", "None", ")", ":", "self", ".", "src", "=", "src", "self", ".", "identifier", "=", "identifier", "# Get image details and calculate tiles", "im", "=", "self", ".", "manipulator_k...
Generate static files for one source image.
[ "Generate", "static", "files", "for", "one", "source", "image", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L214-L261
7,339
zimeon/iiif
iiif/static.py
IIIFStatic.generate_tile
def generate_tile(self, region, size): """Generate one tile for this given region, size of this image.""" r = IIIFRequest(identifier=self.identifier, api_version=self.api_version) if (region == 'full'): r.region_full = True else: r.region_xywh = region # [rx,ry,rw,rh] r.size_wh = size # [sw,sh] r.format = 'jpg' self.generate_file(r, True)
python
def generate_tile(self, region, size): r = IIIFRequest(identifier=self.identifier, api_version=self.api_version) if (region == 'full'): r.region_full = True else: r.region_xywh = region # [rx,ry,rw,rh] r.size_wh = size # [sw,sh] r.format = 'jpg' self.generate_file(r, True)
[ "def", "generate_tile", "(", "self", ",", "region", ",", "size", ")", ":", "r", "=", "IIIFRequest", "(", "identifier", "=", "self", ".", "identifier", ",", "api_version", "=", "self", ".", "api_version", ")", "if", "(", "region", "==", "'full'", ")", "...
Generate one tile for this given region, size of this image.
[ "Generate", "one", "tile", "for", "this", "given", "region", "size", "of", "this", "image", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L263-L273
7,340
zimeon/iiif
iiif/static.py
IIIFStatic.generate_file
def generate_file(self, r, undistorted=False): """Generate file for IIIFRequest object r from this image. FIXME - Would be nicer to have the test for an undistorted image request based on the IIIFRequest object, and then know whether to apply canonicalization or not. Logically we might use `w,h` instead of the Image API v2.0 canonical form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume the new canonical form even in the case where the API version is declared earlier. Thus, determine whether to use the canonical or `w,h` form based solely on the setting of osd_version. """ use_canonical = self.get_osd_config(self.osd_version)['use_canonical'] height = None if (undistorted and use_canonical): height = r.size_wh[1] r.size_wh = [r.size_wh[0], None] # [sw,sh] -> [sw,] path = r.url() # Generate... if (self.dryrun): self.logger.info("%s / %s" % (self.dst, path)) else: m = self.manipulator_klass(api_version=self.api_version) try: m.derive(srcfile=self.src, request=r, outfile=os.path.join(self.dst, path)) self.logger.info("%s / %s" % (self.dst, path)) except IIIFZeroSizeError: self.logger.info("%s / %s - zero size, skipped" % (self.dst, path)) return # done if zero size if (r.region_full and use_canonical and height is not None): # In v2.0 of the spec, the canonical URI form `w,` for scaled # images of the full region was introduced. This is somewhat at # odds with the requirement for `w,h` specified in `sizes` to # be available, and has problems of precision with tall narrow # images. Hopefully will be fixed in 3.0 but for now symlink # the `w,h` form to the `w,` dirs so that might use the specified # `w,h` also work. See # <https://github.com/IIIF/iiif.io/issues/544> # # FIXME - This is ugly because we duplicate code in # iiif.request.url to construct the partial URL region_dir = os.path.join(r.quote(r.identifier), "full") wh_dir = "%d,%d" % (r.size_wh[0], height) wh_path = os.path.join(region_dir, wh_dir) wc_dir = "%d," % (r.size_wh[0]) wc_path = os.path.join(region_dir, wc_dir) if (not self.dryrun): ln = os.path.join(self.dst, wh_path) if (os.path.exists(ln)): os.remove(ln) os.symlink(wc_dir, ln) self.logger.info("%s / %s -> %s" % (self.dst, wh_path, wc_path))
python
def generate_file(self, r, undistorted=False): use_canonical = self.get_osd_config(self.osd_version)['use_canonical'] height = None if (undistorted and use_canonical): height = r.size_wh[1] r.size_wh = [r.size_wh[0], None] # [sw,sh] -> [sw,] path = r.url() # Generate... if (self.dryrun): self.logger.info("%s / %s" % (self.dst, path)) else: m = self.manipulator_klass(api_version=self.api_version) try: m.derive(srcfile=self.src, request=r, outfile=os.path.join(self.dst, path)) self.logger.info("%s / %s" % (self.dst, path)) except IIIFZeroSizeError: self.logger.info("%s / %s - zero size, skipped" % (self.dst, path)) return # done if zero size if (r.region_full and use_canonical and height is not None): # In v2.0 of the spec, the canonical URI form `w,` for scaled # images of the full region was introduced. This is somewhat at # odds with the requirement for `w,h` specified in `sizes` to # be available, and has problems of precision with tall narrow # images. Hopefully will be fixed in 3.0 but for now symlink # the `w,h` form to the `w,` dirs so that might use the specified # `w,h` also work. See # <https://github.com/IIIF/iiif.io/issues/544> # # FIXME - This is ugly because we duplicate code in # iiif.request.url to construct the partial URL region_dir = os.path.join(r.quote(r.identifier), "full") wh_dir = "%d,%d" % (r.size_wh[0], height) wh_path = os.path.join(region_dir, wh_dir) wc_dir = "%d," % (r.size_wh[0]) wc_path = os.path.join(region_dir, wc_dir) if (not self.dryrun): ln = os.path.join(self.dst, wh_path) if (os.path.exists(ln)): os.remove(ln) os.symlink(wc_dir, ln) self.logger.info("%s / %s -> %s" % (self.dst, wh_path, wc_path))
[ "def", "generate_file", "(", "self", ",", "r", ",", "undistorted", "=", "False", ")", ":", "use_canonical", "=", "self", ".", "get_osd_config", "(", "self", ".", "osd_version", ")", "[", "'use_canonical'", "]", "height", "=", "None", "if", "(", "undistorte...
Generate file for IIIFRequest object r from this image. FIXME - Would be nicer to have the test for an undistorted image request based on the IIIFRequest object, and then know whether to apply canonicalization or not. Logically we might use `w,h` instead of the Image API v2.0 canonical form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume the new canonical form even in the case where the API version is declared earlier. Thus, determine whether to use the canonical or `w,h` form based solely on the setting of osd_version.
[ "Generate", "file", "for", "IIIFRequest", "object", "r", "from", "this", "image", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L275-L329
7,341
zimeon/iiif
iiif/static.py
IIIFStatic.setup_destination
def setup_destination(self): """Setup output directory based on self.dst and self.identifier. Returns the output directory name on success, raises and exception on failure. """ # Do we have a separate identifier? if (not self.identifier): # No separate identifier specified, split off the last path segment # of the source name, strip the extension to get the identifier self.identifier = os.path.splitext(os.path.split(self.src)[1])[0] # Done if dryrun, else setup self.dst first if (self.dryrun): return if (not self.dst): raise IIIFStaticError("No destination directory specified!") dst = self.dst if (os.path.isdir(dst)): # Exists, OK pass elif (os.path.isfile(dst)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % dst) else: os.makedirs(dst) # Second, create identifier based subdir if necessary outd = os.path.join(dst, self.identifier) if (os.path.isdir(outd)): # Nothing for now, perhaps should delete? self.logger.warning( "Output directory %s already exists, adding/updating files" % outd) pass elif (os.path.isfile(outd)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % outd) else: os.makedirs(outd) self.logger.debug("Output directory %s" % outd)
python
def setup_destination(self): # Do we have a separate identifier? if (not self.identifier): # No separate identifier specified, split off the last path segment # of the source name, strip the extension to get the identifier self.identifier = os.path.splitext(os.path.split(self.src)[1])[0] # Done if dryrun, else setup self.dst first if (self.dryrun): return if (not self.dst): raise IIIFStaticError("No destination directory specified!") dst = self.dst if (os.path.isdir(dst)): # Exists, OK pass elif (os.path.isfile(dst)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % dst) else: os.makedirs(dst) # Second, create identifier based subdir if necessary outd = os.path.join(dst, self.identifier) if (os.path.isdir(outd)): # Nothing for now, perhaps should delete? self.logger.warning( "Output directory %s already exists, adding/updating files" % outd) pass elif (os.path.isfile(outd)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % outd) else: os.makedirs(outd) self.logger.debug("Output directory %s" % outd)
[ "def", "setup_destination", "(", "self", ")", ":", "# Do we have a separate identifier?", "if", "(", "not", "self", ".", "identifier", ")", ":", "# No separate identifier specified, split off the last path segment", "# of the source name, strip the extension to get the identifier", ...
Setup output directory based on self.dst and self.identifier. Returns the output directory name on success, raises and exception on failure.
[ "Setup", "output", "directory", "based", "on", "self", ".", "dst", "and", "self", ".", "identifier", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L331-L368
7,342
zimeon/iiif
iiif/static.py
IIIFStatic.write_html
def write_html(self, html_dir='/tmp', include_osd=False, osd_width=500, osd_height=500): """Write HTML test page using OpenSeadragon for the tiles generated. Assumes that the generate(..) method has already been called to set up identifier etc. Parameters: html_dir - output directory for HTML files, will be created if it does not already exist include_osd - true to include OpenSeadragon code osd_width - width of OpenSeadragon pane in pixels osd_height - height of OpenSeadragon pane in pixels """ osd_config = self.get_osd_config(self.osd_version) osd_base = osd_config['base'] osd_dir = osd_config['dir'] # relative to base osd_js = os.path.join(osd_dir, osd_config['js']) osd_images = os.path.join(osd_dir, osd_config['images']) if (os.path.isdir(html_dir)): # Exists, fine pass elif (os.path.isfile(html_dir)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % html_dir) else: os.makedirs(html_dir) self.logger.info("Writing HTML to %s" % (html_dir)) with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f: template = f.read() outfile = self.identifier + '.html' outpath = os.path.join(html_dir, outfile) with open(outpath, 'w') as f: info_json_uri = '/'.join([self.identifier, 'info.json']) if (self.prefix): info_json_uri = '/'.join([self.prefix, info_json_uri]) d = dict(identifier=self.identifier, api_version=self.api_version, osd_version=self.osd_version, osd_uri=osd_js, osd_images_prefix=osd_images, osd_height=osd_width, osd_width=osd_height, info_json_uri=info_json_uri) f.write(Template(template).safe_substitute(d)) self.logger.info("%s / %s" % (html_dir, outfile)) # Do we want to copy OSD in there too? If so, do it only if # we haven't already if (include_osd): if (self.copied_osd): self.logger.info("OpenSeadragon already copied") else: # Make directory, copy JavaScript and icons (from osd_images) osd_path = os.path.join(html_dir, osd_dir) if (not os.path.isdir(osd_path)): os.makedirs(osd_path) shutil.copyfile(os.path.join(osd_base, osd_js), os.path.join(html_dir, osd_js)) self.logger.info("%s / %s" % (html_dir, osd_js)) osd_images_path = os.path.join(html_dir, osd_images) if (os.path.isdir(osd_images_path)): self.logger.warning( "OpenSeadragon images directory (%s) already exists, skipping" % osd_images_path) else: shutil.copytree(os.path.join(osd_base, osd_images), osd_images_path) self.logger.info("%s / %s/*" % (html_dir, osd_images)) self.copied_osd = True
python
def write_html(self, html_dir='/tmp', include_osd=False, osd_width=500, osd_height=500): osd_config = self.get_osd_config(self.osd_version) osd_base = osd_config['base'] osd_dir = osd_config['dir'] # relative to base osd_js = os.path.join(osd_dir, osd_config['js']) osd_images = os.path.join(osd_dir, osd_config['images']) if (os.path.isdir(html_dir)): # Exists, fine pass elif (os.path.isfile(html_dir)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % html_dir) else: os.makedirs(html_dir) self.logger.info("Writing HTML to %s" % (html_dir)) with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f: template = f.read() outfile = self.identifier + '.html' outpath = os.path.join(html_dir, outfile) with open(outpath, 'w') as f: info_json_uri = '/'.join([self.identifier, 'info.json']) if (self.prefix): info_json_uri = '/'.join([self.prefix, info_json_uri]) d = dict(identifier=self.identifier, api_version=self.api_version, osd_version=self.osd_version, osd_uri=osd_js, osd_images_prefix=osd_images, osd_height=osd_width, osd_width=osd_height, info_json_uri=info_json_uri) f.write(Template(template).safe_substitute(d)) self.logger.info("%s / %s" % (html_dir, outfile)) # Do we want to copy OSD in there too? If so, do it only if # we haven't already if (include_osd): if (self.copied_osd): self.logger.info("OpenSeadragon already copied") else: # Make directory, copy JavaScript and icons (from osd_images) osd_path = os.path.join(html_dir, osd_dir) if (not os.path.isdir(osd_path)): os.makedirs(osd_path) shutil.copyfile(os.path.join(osd_base, osd_js), os.path.join(html_dir, osd_js)) self.logger.info("%s / %s" % (html_dir, osd_js)) osd_images_path = os.path.join(html_dir, osd_images) if (os.path.isdir(osd_images_path)): self.logger.warning( "OpenSeadragon images directory (%s) already exists, skipping" % osd_images_path) else: shutil.copytree(os.path.join(osd_base, osd_images), osd_images_path) self.logger.info("%s / %s/*" % (html_dir, osd_images)) self.copied_osd = True
[ "def", "write_html", "(", "self", ",", "html_dir", "=", "'/tmp'", ",", "include_osd", "=", "False", ",", "osd_width", "=", "500", ",", "osd_height", "=", "500", ")", ":", "osd_config", "=", "self", ".", "get_osd_config", "(", "self", ".", "osd_version", ...
Write HTML test page using OpenSeadragon for the tiles generated. Assumes that the generate(..) method has already been called to set up identifier etc. Parameters: html_dir - output directory for HTML files, will be created if it does not already exist include_osd - true to include OpenSeadragon code osd_width - width of OpenSeadragon pane in pixels osd_height - height of OpenSeadragon pane in pixels
[ "Write", "HTML", "test", "page", "using", "OpenSeadragon", "for", "the", "tiles", "generated", "." ]
9d10018d01202fa2a76dfa61598dc6eca07b471f
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L370-L436
7,343
polyaxon/polyaxon-schemas
polyaxon_schemas/utils.py
get_value
def get_value(key, obj, default=missing): """Helper for pulling a keyed value off various types of objects""" if isinstance(key, int): return _get_value_for_key(key, obj, default) return _get_value_for_keys(key.split('.'), obj, default)
python
def get_value(key, obj, default=missing): if isinstance(key, int): return _get_value_for_key(key, obj, default) return _get_value_for_keys(key.split('.'), obj, default)
[ "def", "get_value", "(", "key", ",", "obj", ",", "default", "=", "missing", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "_get_value_for_key", "(", "key", ",", "obj", ",", "default", ")", "return", "_get_value_for_keys", "(",...
Helper for pulling a keyed value off various types of objects
[ "Helper", "for", "pulling", "a", "keyed", "value", "off", "various", "types", "of", "objects" ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/utils.py#L371-L375
7,344
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/libs/validator.py
validate_headers
def validate_headers(spec, data): """Validates headers data and creates the config objects""" validated_data = { spec.VERSION: data[spec.VERSION], spec.KIND: data[spec.KIND], } if data.get(spec.LOGGING): validated_data[spec.LOGGING] = LoggingConfig.from_dict( data[spec.LOGGING]) if data.get(spec.TAGS): validated_data[spec.TAGS] = data[spec.TAGS] if data.get(spec.HP_TUNING): validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict( data[spec.HP_TUNING]) return validated_data
python
def validate_headers(spec, data): validated_data = { spec.VERSION: data[spec.VERSION], spec.KIND: data[spec.KIND], } if data.get(spec.LOGGING): validated_data[spec.LOGGING] = LoggingConfig.from_dict( data[spec.LOGGING]) if data.get(spec.TAGS): validated_data[spec.TAGS] = data[spec.TAGS] if data.get(spec.HP_TUNING): validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict( data[spec.HP_TUNING]) return validated_data
[ "def", "validate_headers", "(", "spec", ",", "data", ")", ":", "validated_data", "=", "{", "spec", ".", "VERSION", ":", "data", "[", "spec", ".", "VERSION", "]", ",", "spec", ".", "KIND", ":", "data", "[", "spec", ".", "KIND", "]", ",", "}", "if", ...
Validates headers data and creates the config objects
[ "Validates", "headers", "data", "and", "creates", "the", "config", "objects" ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/libs/validator.py#L16-L34
7,345
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/libs/validator.py
validate
def validate(spec, data): """Validates the data and creates the config objects""" data = copy.deepcopy(data) validated_data = {} def validate_keys(section, config, section_data): if not isinstance(section_data, dict) or section == spec.MODEL: return extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields] if extra_args: raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format( section, extra_args)) def add_validated_section(section, config): if data.get(section): section_data = data[section] validate_keys(section=section, config=config, section_data=section_data) validated_data[section] = config.from_dict(section_data) add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG) add_validated_section(spec.BUILD, BuildConfig) add_validated_section(spec.RUN, RunConfig) add_validated_section(spec.MODEL, ModelConfig) add_validated_section(spec.TRAIN, TrainConfig) add_validated_section(spec.EVAL, EvalConfig) return validated_data
python
def validate(spec, data): data = copy.deepcopy(data) validated_data = {} def validate_keys(section, config, section_data): if not isinstance(section_data, dict) or section == spec.MODEL: return extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields] if extra_args: raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format( section, extra_args)) def add_validated_section(section, config): if data.get(section): section_data = data[section] validate_keys(section=section, config=config, section_data=section_data) validated_data[section] = config.from_dict(section_data) add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG) add_validated_section(spec.BUILD, BuildConfig) add_validated_section(spec.RUN, RunConfig) add_validated_section(spec.MODEL, ModelConfig) add_validated_section(spec.TRAIN, TrainConfig) add_validated_section(spec.EVAL, EvalConfig) return validated_data
[ "def", "validate", "(", "spec", ",", "data", ")", ":", "data", "=", "copy", ".", "deepcopy", "(", "data", ")", "validated_data", "=", "{", "}", "def", "validate_keys", "(", "section", ",", "config", ",", "section_data", ")", ":", "if", "not", "isinstan...
Validates the data and creates the config objects
[ "Validates", "the", "data", "and", "creates", "the", "config", "objects" ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/libs/validator.py#L37-L64
7,346
polyaxon/polyaxon-schemas
polyaxon_schemas/ops/experiment.py
ExperimentSchema.validate_replicas
def validate_replicas(self, data): """Validate distributed experiment""" environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
python
def validate_replicas(self, data): environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
[ "def", "validate_replicas", "(", "self", ",", "data", ")", ":", "environment", "=", "data", ".", "get", "(", "'environment'", ")", "if", "environment", "and", "environment", ".", "replicas", ":", "validate_replicas", "(", "data", ".", "get", "(", "'framework...
Validate distributed experiment
[ "Validate", "distributed", "experiment" ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/ops/experiment.py#L89-L93
7,347
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/group.py
GroupSpecification.get_experiment_spec
def get_experiment_spec(self, matrix_declaration): """Returns an experiment spec for this group spec and the given matrix declaration.""" parsed_data = Parser.parse(self, self._data, matrix_declaration) del parsed_data[self.HP_TUNING] validator.validate(spec=self, data=parsed_data) return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
python
def get_experiment_spec(self, matrix_declaration): parsed_data = Parser.parse(self, self._data, matrix_declaration) del parsed_data[self.HP_TUNING] validator.validate(spec=self, data=parsed_data) return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
[ "def", "get_experiment_spec", "(", "self", ",", "matrix_declaration", ")", ":", "parsed_data", "=", "Parser", ".", "parse", "(", "self", ",", "self", ".", "_data", ",", "matrix_declaration", ")", "del", "parsed_data", "[", "self", ".", "HP_TUNING", "]", "val...
Returns an experiment spec for this group spec and the given matrix declaration.
[ "Returns", "an", "experiment", "spec", "for", "this", "group", "spec", "and", "the", "given", "matrix", "declaration", "." ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/group.py#L75-L80
7,348
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/group.py
GroupSpecification.get_build_spec
def get_build_spec(self): """Returns a build spec for this group spec.""" if BaseSpecification.BUILD not in self._data: return None return BuildConfig.from_dict(self._data[BaseSpecification.BUILD])
python
def get_build_spec(self): if BaseSpecification.BUILD not in self._data: return None return BuildConfig.from_dict(self._data[BaseSpecification.BUILD])
[ "def", "get_build_spec", "(", "self", ")", ":", "if", "BaseSpecification", ".", "BUILD", "not", "in", "self", ".", "_data", ":", "return", "None", "return", "BuildConfig", ".", "from_dict", "(", "self", ".", "_data", "[", "BaseSpecification", ".", "BUILD", ...
Returns a build spec for this group spec.
[ "Returns", "a", "build", "spec", "for", "this", "group", "spec", "." ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/group.py#L82-L86
7,349
polyaxon/polyaxon-schemas
polyaxon_schemas/ops/hptuning.py
HPTuningSchema.validate_matrix
def validate_matrix(self, data): """Validates matrix data and creates the config objects""" is_grid_search = ( data.get('grid_search') is not None or (data.get('grid_search') is None and data.get('random_search') is None and data.get('hyperband') is None and data.get('bo') is None) ) is_bo = data.get('bo') is not None validate_matrix(data.get('matrix'), is_grid_search=is_grid_search, is_bo=is_bo)
python
def validate_matrix(self, data): is_grid_search = ( data.get('grid_search') is not None or (data.get('grid_search') is None and data.get('random_search') is None and data.get('hyperband') is None and data.get('bo') is None) ) is_bo = data.get('bo') is not None validate_matrix(data.get('matrix'), is_grid_search=is_grid_search, is_bo=is_bo)
[ "def", "validate_matrix", "(", "self", ",", "data", ")", ":", "is_grid_search", "=", "(", "data", ".", "get", "(", "'grid_search'", ")", "is", "not", "None", "or", "(", "data", ".", "get", "(", "'grid_search'", ")", "is", "None", "and", "data", ".", ...
Validates matrix data and creates the config objects
[ "Validates", "matrix", "data", "and", "creates", "the", "config", "objects" ]
a5360240316f4bbccfcdcb97a489cab14458277a
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/ops/hptuning.py#L371-L381
7,350
tompollard/tableone
tableone.py
TableOne._generate_remark_str
def _generate_remark_str(self, end_of_line = '\n'): """ Generate a series of remarks that the user should consider when interpreting the summary statistics. """ warnings = {} msg = '{}'.format(end_of_line) # generate warnings for continuous variables if self._continuous: # highlight far outliers outlier_mask = self.cont_describe.far_outliers > 1 outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index) if outlier_vars: warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars # highlight possible multimodal distributions using hartigan's dip test # -1 values indicate NaN modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05) modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars # highlight non normal distributions # -1 values indicate NaN modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001) modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars # create the warning string for n,k in enumerate(sorted(warnings)): msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line) return msg
python
def _generate_remark_str(self, end_of_line = '\n'): warnings = {} msg = '{}'.format(end_of_line) # generate warnings for continuous variables if self._continuous: # highlight far outliers outlier_mask = self.cont_describe.far_outliers > 1 outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index) if outlier_vars: warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars # highlight possible multimodal distributions using hartigan's dip test # -1 values indicate NaN modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05) modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars # highlight non normal distributions # -1 values indicate NaN modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001) modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars # create the warning string for n,k in enumerate(sorted(warnings)): msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line) return msg
[ "def", "_generate_remark_str", "(", "self", ",", "end_of_line", "=", "'\\n'", ")", ":", "warnings", "=", "{", "}", "msg", "=", "'{}'", ".", "format", "(", "end_of_line", ")", "# generate warnings for continuous variables", "if", "self", ".", "_continuous", ":", ...
Generate a series of remarks that the user should consider when interpreting the summary statistics.
[ "Generate", "a", "series", "of", "remarks", "that", "the", "user", "should", "consider", "when", "interpreting", "the", "summary", "statistics", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L200-L235
7,351
tompollard/tableone
tableone.py
TableOne._detect_categorical_columns
def _detect_categorical_columns(self,data): """ Detect categorical columns if they are not specified. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- likely_cat : list List of variables that appear to be categorical. """ # assume all non-numerical and date columns are categorical numeric_cols = set(data._get_numeric_data().columns.values) date_cols = set(data.select_dtypes(include=[np.datetime64]).columns) likely_cat = set(data.columns) - numeric_cols likely_cat = list(likely_cat - date_cols) # check proportion of unique values if numerical for var in data._get_numeric_data().columns: likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05 if likely_flag: likely_cat.append(var) return likely_cat
python
def _detect_categorical_columns(self,data): # assume all non-numerical and date columns are categorical numeric_cols = set(data._get_numeric_data().columns.values) date_cols = set(data.select_dtypes(include=[np.datetime64]).columns) likely_cat = set(data.columns) - numeric_cols likely_cat = list(likely_cat - date_cols) # check proportion of unique values if numerical for var in data._get_numeric_data().columns: likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05 if likely_flag: likely_cat.append(var) return likely_cat
[ "def", "_detect_categorical_columns", "(", "self", ",", "data", ")", ":", "# assume all non-numerical and date columns are categorical", "numeric_cols", "=", "set", "(", "data", ".", "_get_numeric_data", "(", ")", ".", "columns", ".", "values", ")", "date_cols", "=", ...
Detect categorical columns if they are not specified. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- likely_cat : list List of variables that appear to be categorical.
[ "Detect", "categorical", "columns", "if", "they", "are", "not", "specified", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L237-L261
7,352
tompollard/tableone
tableone.py
TableOne._std
def _std(self,x): """ Compute standard deviation with ddof degrees of freedom """ return np.nanstd(x.values,ddof=self._ddof)
python
def _std(self,x): return np.nanstd(x.values,ddof=self._ddof)
[ "def", "_std", "(", "self", ",", "x", ")", ":", "return", "np", ".", "nanstd", "(", "x", ".", "values", ",", "ddof", "=", "self", ".", "_ddof", ")" ]
Compute standard deviation with ddof degrees of freedom
[ "Compute", "standard", "deviation", "with", "ddof", "degrees", "of", "freedom" ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L275-L279
7,353
tompollard/tableone
tableone.py
TableOne._tukey
def _tukey(self,x,threshold): """ Count outliers according to Tukey's rule. Where Q1 is the lower quartile and Q3 is the upper quartile, an outlier is an observation outside of the range: [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] k = 1.5 indicates an outlier k = 3.0 indicates an outlier that is "far out" """ vals = x.values[~np.isnan(x.values)] try: q1, q3 = np.percentile(vals, [25, 75]) iqr = q3 - q1 low_bound = q1 - (iqr * threshold) high_bound = q3 + (iqr * threshold) outliers = np.where((vals > high_bound) | (vals < low_bound)) except: outliers = [] return outliers
python
def _tukey(self,x,threshold): vals = x.values[~np.isnan(x.values)] try: q1, q3 = np.percentile(vals, [25, 75]) iqr = q3 - q1 low_bound = q1 - (iqr * threshold) high_bound = q3 + (iqr * threshold) outliers = np.where((vals > high_bound) | (vals < low_bound)) except: outliers = [] return outliers
[ "def", "_tukey", "(", "self", ",", "x", ",", "threshold", ")", ":", "vals", "=", "x", ".", "values", "[", "~", "np", ".", "isnan", "(", "x", ".", "values", ")", "]", "try", ":", "q1", ",", "q3", "=", "np", ".", "percentile", "(", "vals", ",",...
Count outliers according to Tukey's rule. Where Q1 is the lower quartile and Q3 is the upper quartile, an outlier is an observation outside of the range: [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] k = 1.5 indicates an outlier k = 3.0 indicates an outlier that is "far out"
[ "Count", "outliers", "according", "to", "Tukey", "s", "rule", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L311-L332
7,354
tompollard/tableone
tableone.py
TableOne._outliers
def _outliers(self,x): """ Compute number of outliers """ outliers = self._tukey(x, threshold = 1.5) return np.size(outliers)
python
def _outliers(self,x): outliers = self._tukey(x, threshold = 1.5) return np.size(outliers)
[ "def", "_outliers", "(", "self", ",", "x", ")", ":", "outliers", "=", "self", ".", "_tukey", "(", "x", ",", "threshold", "=", "1.5", ")", "return", "np", ".", "size", "(", "outliers", ")" ]
Compute number of outliers
[ "Compute", "number", "of", "outliers" ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L334-L339
7,355
tompollard/tableone
tableone.py
TableOne._far_outliers
def _far_outliers(self,x): """ Compute number of "far out" outliers """ outliers = self._tukey(x, threshold = 3.0) return np.size(outliers)
python
def _far_outliers(self,x): outliers = self._tukey(x, threshold = 3.0) return np.size(outliers)
[ "def", "_far_outliers", "(", "self", ",", "x", ")", ":", "outliers", "=", "self", ".", "_tukey", "(", "x", ",", "threshold", "=", "3.0", ")", "return", "np", ".", "size", "(", "outliers", ")" ]
Compute number of "far out" outliers
[ "Compute", "number", "of", "far", "out", "outliers" ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L341-L346
7,356
tompollard/tableone
tableone.py
TableOne._create_cat_describe
def _create_cat_describe(self,data): """ Describe the categorical data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cat : pandas DataFrame Summarise the categorical variables. """ group_dict = {} for g in self._groupbylvls: if self._groupby: d_slice = data.loc[data[self._groupby] == g, self._categorical] else: d_slice = data[self._categorical].copy() # create a dataframe with freq, proportion df = d_slice.copy() # convert type to string to avoid int converted to boolean, avoiding nans for column in df.columns: df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values] df = df.melt().groupby(['variable','value']).size().to_frame(name='freq') df.index.set_names('level', level=1, inplace=True) df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100 # set number of decimal places for percent if isinstance(self._decimals,int): n = self._decimals f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) elif isinstance(self._decimals,dict): df.loc[:,'percent'] = df.apply(self._format_cat, axis=1) else: n = 1 f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) # add n column, listing total non-null values for each variable ct = d_slice.count().to_frame(name='n') ct.index.name = 'variable' df = df.join(ct) # add null count nulls = d_slice.isnull().sum().to_frame(name='isnull') nulls.index.name = 'variable' # only save null count to the first category for each variable # do this by extracting the first category from the df row index levels = df.reset_index()[['variable','level']].groupby('variable').first() # add this category to the nulls table nulls = nulls.join(levels) nulls.set_index('level', append=True, inplace=True) # join nulls to categorical df = df.join(nulls) # add summary column df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')' # add to dictionary group_dict[g] = df df_cat = pd.concat(group_dict,axis=1) # ensure the groups are the 2nd level of the column index if df_cat.columns.nlevels>1: df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0) return df_cat
python
def _create_cat_describe(self,data): group_dict = {} for g in self._groupbylvls: if self._groupby: d_slice = data.loc[data[self._groupby] == g, self._categorical] else: d_slice = data[self._categorical].copy() # create a dataframe with freq, proportion df = d_slice.copy() # convert type to string to avoid int converted to boolean, avoiding nans for column in df.columns: df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values] df = df.melt().groupby(['variable','value']).size().to_frame(name='freq') df.index.set_names('level', level=1, inplace=True) df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100 # set number of decimal places for percent if isinstance(self._decimals,int): n = self._decimals f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) elif isinstance(self._decimals,dict): df.loc[:,'percent'] = df.apply(self._format_cat, axis=1) else: n = 1 f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) # add n column, listing total non-null values for each variable ct = d_slice.count().to_frame(name='n') ct.index.name = 'variable' df = df.join(ct) # add null count nulls = d_slice.isnull().sum().to_frame(name='isnull') nulls.index.name = 'variable' # only save null count to the first category for each variable # do this by extracting the first category from the df row index levels = df.reset_index()[['variable','level']].groupby('variable').first() # add this category to the nulls table nulls = nulls.join(levels) nulls.set_index('level', append=True, inplace=True) # join nulls to categorical df = df.join(nulls) # add summary column df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')' # add to dictionary group_dict[g] = df df_cat = pd.concat(group_dict,axis=1) # ensure the groups are the 2nd level of the column index if df_cat.columns.nlevels>1: df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0) return df_cat
[ "def", "_create_cat_describe", "(", "self", ",", "data", ")", ":", "group_dict", "=", "{", "}", "for", "g", "in", "self", ".", "_groupbylvls", ":", "if", "self", ".", "_groupby", ":", "d_slice", "=", "data", ".", "loc", "[", "data", "[", "self", ".",...
Describe the categorical data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cat : pandas DataFrame Summarise the categorical variables.
[ "Describe", "the", "categorical", "data", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L443-L516
7,357
tompollard/tableone
tableone.py
TableOne._create_significance_table
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
python
def _create_significance_table(self,data): # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
[ "def", "_create_significance_table", "(", "self", ",", "data", ")", ":", "# list features of the variable e.g. matched, paired, n_expected", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "self", ".", "_continuous", "+", "self", ".", "_categorical", ",", "col...
Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc.
[ "Create", "a", "table", "containing", "p", "-", "values", "for", "significance", "tests", ".", "Add", "features", "of", "the", "distributions", "and", "the", "p", "-", "values", "to", "the", "dataframe", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L518-L572
7,358
tompollard/tableone
tableone.py
TableOne._create_cont_table
def _create_cont_table(self,data): """ Create tableone for continuous data. Returns ---------- table : pandas DataFrame A table summarising the continuous variables. """ # remove the t1_summary level table = self.cont_describe[['t1_summary']].copy() table.columns = table.columns.droplevel(level=0) # add a column of null counts as 1-count() from previous function nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull') try: table = table.join(nulltable) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(nulltable) # add an empty level column, for joining with cat table table['level'] = '' table.set_index([table.index,'level'],inplace=True) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
python
def _create_cont_table(self,data): # remove the t1_summary level table = self.cont_describe[['t1_summary']].copy() table.columns = table.columns.droplevel(level=0) # add a column of null counts as 1-count() from previous function nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull') try: table = table.join(nulltable) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(nulltable) # add an empty level column, for joining with cat table table['level'] = '' table.set_index([table.index,'level'],inplace=True) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
[ "def", "_create_cont_table", "(", "self", ",", "data", ")", ":", "# remove the t1_summary level", "table", "=", "self", ".", "cont_describe", "[", "[", "'t1_summary'", "]", "]", ".", "copy", "(", ")", "table", ".", "columns", "=", "table", ".", "columns", ...
Create tableone for continuous data. Returns ---------- table : pandas DataFrame A table summarising the continuous variables.
[ "Create", "tableone", "for", "continuous", "data", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L642-L673
7,359
tompollard/tableone
tableone.py
TableOne._create_cat_table
def _create_cat_table(self,data): """ Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables. """ table = self.cat_describe['t1_summary'].copy() # add the total count of null values across all levels isnull = data[self._categorical].isnull().sum().to_frame(name='isnull') isnull.index.rename('variable', inplace=True) try: table = table.join(isnull) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(isnull) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
python
def _create_cat_table(self,data): table = self.cat_describe['t1_summary'].copy() # add the total count of null values across all levels isnull = data[self._categorical].isnull().sum().to_frame(name='isnull') isnull.index.rename('variable', inplace=True) try: table = table.join(isnull) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(isnull) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
[ "def", "_create_cat_table", "(", "self", ",", "data", ")", ":", "table", "=", "self", ".", "cat_describe", "[", "'t1_summary'", "]", ".", "copy", "(", ")", "# add the total count of null values across all levels", "isnull", "=", "data", "[", "self", ".", "_categ...
Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables.
[ "Create", "table", "one", "for", "categorical", "data", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L675-L700
7,360
tompollard/tableone
tableone.py
TableOne._create_row_labels
def _create_row_labels(self): """ Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label. """ # start with the original column names labels = {} for c in self._columns: labels[c] = c # replace column names with alternative names if provided if self._alt_labels: for k in self._alt_labels.keys(): labels[k] = self._alt_labels[k] # append the label suffix if self._label_suffix: for k in labels.keys(): if k in self._nonnormal: labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]") elif k in self._categorical: labels[k] = "{}, {}".format(labels[k],"n (%)") else: labels[k] = "{}, {}".format(labels[k],"mean (SD)") return labels
python
def _create_row_labels(self): # start with the original column names labels = {} for c in self._columns: labels[c] = c # replace column names with alternative names if provided if self._alt_labels: for k in self._alt_labels.keys(): labels[k] = self._alt_labels[k] # append the label suffix if self._label_suffix: for k in labels.keys(): if k in self._nonnormal: labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]") elif k in self._categorical: labels[k] = "{}, {}".format(labels[k],"n (%)") else: labels[k] = "{}, {}".format(labels[k],"mean (SD)") return labels
[ "def", "_create_row_labels", "(", "self", ")", ":", "# start with the original column names", "labels", "=", "{", "}", "for", "c", "in", "self", ".", "_columns", ":", "labels", "[", "c", "]", "=", "c", "# replace column names with alternative names if provided", "if...
Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label.
[ "Take", "the", "original", "labels", "for", "rows", ".", "Rename", "if", "alternative", "labels", "are", "provided", ".", "Append", "label", "suffix", "if", "label_suffix", "is", "True", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L836-L867
7,361
tompollard/tableone
modality.py
bandwidth_factor
def bandwidth_factor(nbr_data_pts, deriv_order=0): ''' Scale factor for one-dimensional plug-in bandwidth selection. ''' if deriv_order == 0: return (3.0*nbr_data_pts/4)**(-1.0/5) if deriv_order == 2: return (7.0*nbr_data_pts/4)**(-1.0/9) raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
python
def bandwidth_factor(nbr_data_pts, deriv_order=0): ''' Scale factor for one-dimensional plug-in bandwidth selection. ''' if deriv_order == 0: return (3.0*nbr_data_pts/4)**(-1.0/5) if deriv_order == 2: return (7.0*nbr_data_pts/4)**(-1.0/9) raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
[ "def", "bandwidth_factor", "(", "nbr_data_pts", ",", "deriv_order", "=", "0", ")", ":", "if", "deriv_order", "==", "0", ":", "return", "(", "3.0", "*", "nbr_data_pts", "/", "4", ")", "**", "(", "-", "1.0", "/", "5", ")", "if", "deriv_order", "==", "2...
Scale factor for one-dimensional plug-in bandwidth selection.
[ "Scale", "factor", "for", "one", "-", "dimensional", "plug", "-", "in", "bandwidth", "selection", "." ]
4a274d3d2f8d16b8eaa0bde030f3da29b876cee8
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L1011-L1021
7,362
Pylons/paginate
paginate/__init__.py
make_html_tag
def make_html_tag(tag, text=None, **params): """Create an HTML tag string. tag The HTML tag to use (e.g. 'a', 'span' or 'div') text The text to enclose between opening and closing tag. If no text is specified then only the opening tag is returned. Example:: make_html_tag('a', text="Hello", href="/another/page") -> <a href="/another/page">Hello</a> To use reserved Python keywords like "class" as a parameter prepend it with an underscore. Instead of "class='green'" use "_class='green'". Warning: Quotes and apostrophes are not escaped.""" params_string = "" # Parameters are passed. Turn the dict into a string like "a=1 b=2 c=3" string. for key, value in sorted(params.items()): # Strip off a leading underscore from the attribute's key to allow attributes like '_class' # to be used as a CSS class specification instead of the reserved Python keyword 'class'. key = key.lstrip("_") params_string += u' {0}="{1}"'.format(key, value) # Create the tag string tag_string = u"<{0}{1}>".format(tag, params_string) # Add text and closing tag if required. if text: tag_string += u"{0}</{1}>".format(text, tag) return tag_string
python
def make_html_tag(tag, text=None, **params): params_string = "" # Parameters are passed. Turn the dict into a string like "a=1 b=2 c=3" string. for key, value in sorted(params.items()): # Strip off a leading underscore from the attribute's key to allow attributes like '_class' # to be used as a CSS class specification instead of the reserved Python keyword 'class'. key = key.lstrip("_") params_string += u' {0}="{1}"'.format(key, value) # Create the tag string tag_string = u"<{0}{1}>".format(tag, params_string) # Add text and closing tag if required. if text: tag_string += u"{0}</{1}>".format(text, tag) return tag_string
[ "def", "make_html_tag", "(", "tag", ",", "text", "=", "None", ",", "*", "*", "params", ")", ":", "params_string", "=", "\"\"", "# Parameters are passed. Turn the dict into a string like \"a=1 b=2 c=3\" string.", "for", "key", ",", "value", "in", "sorted", "(", "para...
Create an HTML tag string. tag The HTML tag to use (e.g. 'a', 'span' or 'div') text The text to enclose between opening and closing tag. If no text is specified then only the opening tag is returned. Example:: make_html_tag('a', text="Hello", href="/another/page") -> <a href="/another/page">Hello</a> To use reserved Python keywords like "class" as a parameter prepend it with an underscore. Instead of "class='green'" use "_class='green'". Warning: Quotes and apostrophes are not escaped.
[ "Create", "an", "HTML", "tag", "string", "." ]
07e6f62c00a731839ca2da32e6d6a37b31a13d4f
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L828-L863
7,363
Pylons/paginate
paginate/__init__.py
Page._range
def _range(self, link_map, radius): """ Return range of linked pages to substiture placeholder in pattern """ leftmost_page = max(self.first_page, (self.page - radius)) rightmost_page = min(self.last_page, (self.page + radius)) nav_items = [] # Create a link to the first page (unless we are on the first page # or there would be no need to insert '..' spacers) if self.page != self.first_page and self.first_page < leftmost_page: page = link_map["first_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) for item in link_map["range_pages"]: nav_items.append(self.link_tag(item)) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) if self.page != self.last_page and rightmost_page < self.last_page: page = link_map["last_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) return self.separator.join(nav_items)
python
def _range(self, link_map, radius): leftmost_page = max(self.first_page, (self.page - radius)) rightmost_page = min(self.last_page, (self.page + radius)) nav_items = [] # Create a link to the first page (unless we are on the first page # or there would be no need to insert '..' spacers) if self.page != self.first_page and self.first_page < leftmost_page: page = link_map["first_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) for item in link_map["range_pages"]: nav_items.append(self.link_tag(item)) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) if self.page != self.last_page and rightmost_page < self.last_page: page = link_map["last_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) return self.separator.join(nav_items)
[ "def", "_range", "(", "self", ",", "link_map", ",", "radius", ")", ":", "leftmost_page", "=", "max", "(", "self", ".", "first_page", ",", "(", "self", ".", "page", "-", "radius", ")", ")", "rightmost_page", "=", "min", "(", "self", ".", "last_page", ...
Return range of linked pages to substiture placeholder in pattern
[ "Return", "range", "of", "linked", "pages", "to", "substiture", "placeholder", "in", "pattern" ]
07e6f62c00a731839ca2da32e6d6a37b31a13d4f
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L773-L799
7,364
Pylons/paginate
paginate/__init__.py
Page.default_link_tag
def default_link_tag(item): """ Create an A-HREF tag that points to another page. """ text = item["value"] target_url = item["href"] if not item["href"] or item["type"] in ("span", "current_page"): if item["attrs"]: text = make_html_tag("span", **item["attrs"]) + text + "</span>" return text return make_html_tag("a", text=text, href=target_url, **item["attrs"])
python
def default_link_tag(item): text = item["value"] target_url = item["href"] if not item["href"] or item["type"] in ("span", "current_page"): if item["attrs"]: text = make_html_tag("span", **item["attrs"]) + text + "</span>" return text return make_html_tag("a", text=text, href=target_url, **item["attrs"])
[ "def", "default_link_tag", "(", "item", ")", ":", "text", "=", "item", "[", "\"value\"", "]", "target_url", "=", "item", "[", "\"href\"", "]", "if", "not", "item", "[", "\"href\"", "]", "or", "item", "[", "\"type\"", "]", "in", "(", "\"span\"", ",", ...
Create an A-HREF tag that points to another page.
[ "Create", "an", "A", "-", "HREF", "tag", "that", "points", "to", "another", "page", "." ]
07e6f62c00a731839ca2da32e6d6a37b31a13d4f
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L813-L825
7,365
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger.tag
def tag(self, corpus, tokenize=True): '''Tags a string `corpus`.''' # Assume untokenized corpus has \n between sentences and ' ' between words s_split = SentenceTokenizer().tokenize if tokenize else lambda t: t.split('\n') w_split = WordTokenizer().tokenize if tokenize else lambda s: s.split() def split_sents(corpus): for s in s_split(corpus): yield w_split(s) prev, prev2 = self.START tokens = [] for words in split_sents(corpus): context = self.START + [self._normalize(w) for w in words] + self.END for i, word in enumerate(words): tag = self.tagdict.get(word) if not tag: features = self._get_features(i, word, context, prev, prev2) tag = self.model.predict(features) tokens.append((word, tag)) prev2 = prev prev = tag return tokens
python
def tag(self, corpus, tokenize=True): '''Tags a string `corpus`.''' # Assume untokenized corpus has \n between sentences and ' ' between words s_split = SentenceTokenizer().tokenize if tokenize else lambda t: t.split('\n') w_split = WordTokenizer().tokenize if tokenize else lambda s: s.split() def split_sents(corpus): for s in s_split(corpus): yield w_split(s) prev, prev2 = self.START tokens = [] for words in split_sents(corpus): context = self.START + [self._normalize(w) for w in words] + self.END for i, word in enumerate(words): tag = self.tagdict.get(word) if not tag: features = self._get_features(i, word, context, prev, prev2) tag = self.model.predict(features) tokens.append((word, tag)) prev2 = prev prev = tag return tokens
[ "def", "tag", "(", "self", ",", "corpus", ",", "tokenize", "=", "True", ")", ":", "# Assume untokenized corpus has \\n between sentences and ' ' between words", "s_split", "=", "SentenceTokenizer", "(", ")", ".", "tokenize", "if", "tokenize", "else", "lambda", "t", ...
Tags a string `corpus`.
[ "Tags", "a", "string", "corpus", "." ]
fb98bbd16a83650cab4819c4b89f0973e60fb3fe
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L38-L59
7,366
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger.train
def train(self, sentences, save_loc=None, nr_iter=5): '''Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations. ''' self._make_tagdict(sentences) self.model.classes = self.classes for iter_ in range(nr_iter): c = 0 n = 0 for words, tags in sentences: prev, prev2 = self.START context = self.START + [self._normalize(w) for w in words] \ + self.END for i, word in enumerate(words): guess = self.tagdict.get(word) if not guess: feats = self._get_features(i, word, context, prev, prev2) guess = self.model.predict(feats) self.model.update(tags[i], guess, feats) prev2 = prev prev = guess c += guess == tags[i] n += 1 random.shuffle(sentences) logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n))) self.model.average_weights() # Pickle as a binary file if save_loc is not None: pickle.dump((self.model.weights, self.tagdict, self.classes), open(save_loc, 'wb'), -1) return None
python
def train(self, sentences, save_loc=None, nr_iter=5): '''Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations. ''' self._make_tagdict(sentences) self.model.classes = self.classes for iter_ in range(nr_iter): c = 0 n = 0 for words, tags in sentences: prev, prev2 = self.START context = self.START + [self._normalize(w) for w in words] \ + self.END for i, word in enumerate(words): guess = self.tagdict.get(word) if not guess: feats = self._get_features(i, word, context, prev, prev2) guess = self.model.predict(feats) self.model.update(tags[i], guess, feats) prev2 = prev prev = guess c += guess == tags[i] n += 1 random.shuffle(sentences) logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n))) self.model.average_weights() # Pickle as a binary file if save_loc is not None: pickle.dump((self.model.weights, self.tagdict, self.classes), open(save_loc, 'wb'), -1) return None
[ "def", "train", "(", "self", ",", "sentences", ",", "save_loc", "=", "None", ",", "nr_iter", "=", "5", ")", ":", "self", ".", "_make_tagdict", "(", "sentences", ")", "self", ".", "model", ".", "classes", "=", "self", ".", "classes", "for", "iter_", "...
Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations.
[ "Train", "a", "model", "from", "sentences", "and", "save", "it", "at", "save_loc", ".", "nr_iter", "controls", "the", "number", "of", "Perceptron", "training", "iterations", "." ]
fb98bbd16a83650cab4819c4b89f0973e60fb3fe
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L61-L95
7,367
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger.load
def load(self, loc): '''Load a pickled model.''' try: w_td_c = pickle.load(open(loc, 'rb')) except IOError: msg = ("Missing trontagger.pickle file.") raise MissingCorpusError(msg) self.model.weights, self.tagdict, self.classes = w_td_c self.model.classes = self.classes return None
python
def load(self, loc): '''Load a pickled model.''' try: w_td_c = pickle.load(open(loc, 'rb')) except IOError: msg = ("Missing trontagger.pickle file.") raise MissingCorpusError(msg) self.model.weights, self.tagdict, self.classes = w_td_c self.model.classes = self.classes return None
[ "def", "load", "(", "self", ",", "loc", ")", ":", "try", ":", "w_td_c", "=", "pickle", ".", "load", "(", "open", "(", "loc", ",", "'rb'", ")", ")", "except", "IOError", ":", "msg", "=", "(", "\"Missing trontagger.pickle file.\"", ")", "raise", "Missing...
Load a pickled model.
[ "Load", "a", "pickled", "model", "." ]
fb98bbd16a83650cab4819c4b89f0973e60fb3fe
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L97-L106
7,368
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger._normalize
def _normalize(self, word): '''Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str ''' if '-' in word and word[0] != '-': return '!HYPHEN' elif word.isdigit() and len(word) == 4: return '!YEAR' elif word[0].isdigit(): return '!DIGITS' else: return word.lower()
python
def _normalize(self, word): '''Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str ''' if '-' in word and word[0] != '-': return '!HYPHEN' elif word.isdigit() and len(word) == 4: return '!YEAR' elif word[0].isdigit(): return '!DIGITS' else: return word.lower()
[ "def", "_normalize", "(", "self", ",", "word", ")", ":", "if", "'-'", "in", "word", "and", "word", "[", "0", "]", "!=", "'-'", ":", "return", "'!HYPHEN'", "elif", "word", ".", "isdigit", "(", ")", "and", "len", "(", "word", ")", "==", "4", ":", ...
Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str
[ "Normalization", "used", "in", "pre", "-", "processing", "." ]
fb98bbd16a83650cab4819c4b89f0973e60fb3fe
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L108-L124
7,369
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger._make_tagdict
def _make_tagdict(self, sentences): '''Make a tag dictionary for single-tag words.''' counts = defaultdict(lambda: defaultdict(int)) for words, tags in sentences: for word, tag in zip(words, tags): counts[word][tag] += 1 self.classes.add(tag) freq_thresh = 20 ambiguity_thresh = 0.97 for word, tag_freqs in counts.items(): tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) n = sum(tag_freqs.values()) # Don't add rare words to the tag dictionary # Only add quite unambiguous words if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh: self.tagdict[word] = tag
python
def _make_tagdict(self, sentences): '''Make a tag dictionary for single-tag words.''' counts = defaultdict(lambda: defaultdict(int)) for words, tags in sentences: for word, tag in zip(words, tags): counts[word][tag] += 1 self.classes.add(tag) freq_thresh = 20 ambiguity_thresh = 0.97 for word, tag_freqs in counts.items(): tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) n = sum(tag_freqs.values()) # Don't add rare words to the tag dictionary # Only add quite unambiguous words if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh: self.tagdict[word] = tag
[ "def", "_make_tagdict", "(", "self", ",", "sentences", ")", ":", "counts", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "int", ")", ")", "for", "words", ",", "tags", "in", "sentences", ":", "for", "word", ",", "tag", "in", "zip", "(", ...
Make a tag dictionary for single-tag words.
[ "Make", "a", "tag", "dictionary", "for", "single", "-", "tag", "words", "." ]
fb98bbd16a83650cab4819c4b89f0973e60fb3fe
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L153-L168
7,370
sloria/textblob-aptagger
textblob_aptagger/_perceptron.py
train
def train(nr_iter, examples): '''Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations. ''' model = AveragedPerceptron() for i in range(nr_iter): random.shuffle(examples) for features, class_ in examples: scores = model.predict(features) guess, score = max(scores.items(), key=lambda i: i[1]) if guess != class_: model.update(class_, guess, features) model.average_weights() return model
python
def train(nr_iter, examples): '''Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations. ''' model = AveragedPerceptron() for i in range(nr_iter): random.shuffle(examples) for features, class_ in examples: scores = model.predict(features) guess, score = max(scores.items(), key=lambda i: i[1]) if guess != class_: model.update(class_, guess, features) model.average_weights() return model
[ "def", "train", "(", "nr_iter", ",", "examples", ")", ":", "model", "=", "AveragedPerceptron", "(", ")", "for", "i", "in", "range", "(", "nr_iter", ")", ":", "random", ".", "shuffle", "(", "examples", ")", "for", "features", ",", "class_", "in", "examp...
Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations.
[ "Return", "an", "averaged", "perceptron", "model", "trained", "on", "examples", "for", "nr_iter", "iterations", "." ]
fb98bbd16a83650cab4819c4b89f0973e60fb3fe
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/_perceptron.py#L85-L98
7,371
statueofmike/rtsp
scripts/rtp.py
RtpPacket.decode
def decode(self, byteStream): """Decode the RTP packet.""" self.header = bytearray(byteStream[:HEADER_SIZE]) self.payload = byteStream[HEADER_SIZE:]
python
def decode(self, byteStream): self.header = bytearray(byteStream[:HEADER_SIZE]) self.payload = byteStream[HEADER_SIZE:]
[ "def", "decode", "(", "self", ",", "byteStream", ")", ":", "self", ".", "header", "=", "bytearray", "(", "byteStream", "[", ":", "HEADER_SIZE", "]", ")", "self", ".", "payload", "=", "byteStream", "[", "HEADER_SIZE", ":", "]" ]
Decode the RTP packet.
[ "Decode", "the", "RTP", "packet", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/rtp.py#L40-L43
7,372
statueofmike/rtsp
scripts/rtp.py
RtpPacket.timestamp
def timestamp(self): """Return timestamp.""" timestamp = self.header[4] << 24 | self.header[5] << 16 | self.header[6] << 8 | self.header[7] return int(timestamp)
python
def timestamp(self): timestamp = self.header[4] << 24 | self.header[5] << 16 | self.header[6] << 8 | self.header[7] return int(timestamp)
[ "def", "timestamp", "(", "self", ")", ":", "timestamp", "=", "self", ".", "header", "[", "4", "]", "<<", "24", "|", "self", ".", "header", "[", "5", "]", "<<", "16", "|", "self", ".", "header", "[", "6", "]", "<<", "8", "|", "self", ".", "hea...
Return timestamp.
[ "Return", "timestamp", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/rtp.py#L54-L57
7,373
statueofmike/rtsp
scripts/preview.py
preview_stream
def preview_stream(stream): """ Display stream in an OpenCV window until "q" key is pressed """ # together with waitkeys later, helps to close the video window effectively _cv2.startWindowThread() for frame in stream.frame_generator(): if frame is not None: _cv2.imshow('Video', frame) _cv2.moveWindow('Video',5,5) else: break key = _cv2.waitKey(1) & 0xFF if key == ord("q"): break _cv2.waitKey(1) _cv2.destroyAllWindows() _cv2.waitKey(1)
python
def preview_stream(stream): # together with waitkeys later, helps to close the video window effectively _cv2.startWindowThread() for frame in stream.frame_generator(): if frame is not None: _cv2.imshow('Video', frame) _cv2.moveWindow('Video',5,5) else: break key = _cv2.waitKey(1) & 0xFF if key == ord("q"): break _cv2.waitKey(1) _cv2.destroyAllWindows() _cv2.waitKey(1)
[ "def", "preview_stream", "(", "stream", ")", ":", "# together with waitkeys later, helps to close the video window effectively", "_cv2", ".", "startWindowThread", "(", ")", "for", "frame", "in", "stream", ".", "frame_generator", "(", ")", ":", "if", "frame", "is", "no...
Display stream in an OpenCV window until "q" key is pressed
[ "Display", "stream", "in", "an", "OpenCV", "window", "until", "q", "key", "is", "pressed" ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/preview.py#L9-L25
7,374
statueofmike/rtsp
scripts/rtsp.py
printrec
def printrec(recst): """ Pretty-printing rtsp strings """ try: recst = recst.decode('UTF-8') except AttributeError: pass recs=[ x for x in recst.split('\r\n') if x ] for rec in recs: print(rec) print("\n")
python
def printrec(recst): try: recst = recst.decode('UTF-8') except AttributeError: pass recs=[ x for x in recst.split('\r\n') if x ] for rec in recs: print(rec) print("\n")
[ "def", "printrec", "(", "recst", ")", ":", "try", ":", "recst", "=", "recst", ".", "decode", "(", "'UTF-8'", ")", "except", "AttributeError", ":", "pass", "recs", "=", "[", "x", "for", "x", "in", "recst", ".", "split", "(", "'\\r\\n'", ")", "if", "...
Pretty-printing rtsp strings
[ "Pretty", "-", "printing", "rtsp", "strings" ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/rtsp.py#L40-L50
7,375
statueofmike/rtsp
scripts/rtsp.py
get_resources
def get_resources(connection): """ Do an RTSP-DESCRIBE request, then parse out available resources from the response """ resp = connection.describe(verbose=False).split('\r\n') resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )] return resources
python
def get_resources(connection): resp = connection.describe(verbose=False).split('\r\n') resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )] return resources
[ "def", "get_resources", "(", "connection", ")", ":", "resp", "=", "connection", ".", "describe", "(", "verbose", "=", "False", ")", ".", "split", "(", "'\\r\\n'", ")", "resources", "=", "[", "x", ".", "replace", "(", "'a=control:'", ",", "''", ")", "fo...
Do an RTSP-DESCRIBE request, then parse out available resources from the response
[ "Do", "an", "RTSP", "-", "DESCRIBE", "request", "then", "parse", "out", "available", "resources", "from", "the", "response" ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/rtsp.py#L169-L173
7,376
statueofmike/rtsp
scripts/ffmpeg_client.py
FFmpegClient.fetch_image
def fetch_image(self,rtsp_server_uri = _source,timeout_secs = 15): """ Fetch a single frame using FFMPEG. Convert to PIL Image. Slow. """ self._check_ffmpeg() cmd = "ffmpeg -rtsp_transport tcp -i {} -loglevel quiet -frames 1 -f image2pipe -".format(rtsp_server_uri) #stdout = _sp.check_output(ffmpeg_cmd,timeout = timeout_secs) with _sp.Popen(cmd, shell=True, stdout=_sp.PIPE) as process: try: stdout,stderr = process.communicate(timeout=timeout_secs) except _sp.TimeoutExpired as e: process.kill() raise TimeoutError("Connection to {} timed out".format(rtsp_server_uri),e) return _Image.open(_io.BytesIO(stdout))
python
def fetch_image(self,rtsp_server_uri = _source,timeout_secs = 15): self._check_ffmpeg() cmd = "ffmpeg -rtsp_transport tcp -i {} -loglevel quiet -frames 1 -f image2pipe -".format(rtsp_server_uri) #stdout = _sp.check_output(ffmpeg_cmd,timeout = timeout_secs) with _sp.Popen(cmd, shell=True, stdout=_sp.PIPE) as process: try: stdout,stderr = process.communicate(timeout=timeout_secs) except _sp.TimeoutExpired as e: process.kill() raise TimeoutError("Connection to {} timed out".format(rtsp_server_uri),e) return _Image.open(_io.BytesIO(stdout))
[ "def", "fetch_image", "(", "self", ",", "rtsp_server_uri", "=", "_source", ",", "timeout_secs", "=", "15", ")", ":", "self", ".", "_check_ffmpeg", "(", ")", "cmd", "=", "\"ffmpeg -rtsp_transport tcp -i {} -loglevel quiet -frames 1 -f image2pipe -\"", ".", "format", "(...
Fetch a single frame using FFMPEG. Convert to PIL Image. Slow.
[ "Fetch", "a", "single", "frame", "using", "FFMPEG", ".", "Convert", "to", "PIL", "Image", ".", "Slow", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/ffmpeg_client.py#L28-L42
7,377
statueofmike/rtsp
scripts/others/rts2.py
Client.setupMovie
def setupMovie(self): """Setup button handler.""" if self.state == self.INIT: self.sendRtspRequest(self.SETUP)
python
def setupMovie(self): if self.state == self.INIT: self.sendRtspRequest(self.SETUP)
[ "def", "setupMovie", "(", "self", ")", ":", "if", "self", ".", "state", "==", "self", ".", "INIT", ":", "self", ".", "sendRtspRequest", "(", "self", ".", "SETUP", ")" ]
Setup button handler.
[ "Setup", "button", "handler", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L37-L40
7,378
statueofmike/rtsp
scripts/others/rts2.py
Client.exitClient
def exitClient(self): """Teardown button handler.""" self.sendRtspRequest(self.TEARDOWN) #self.handler() os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT) # Delete the cache image from video rate = float(self.counter/self.frameNbr) print('-'*60 + "\nRTP Packet Loss Rate :" + str(rate) +"\n" + '-'*60) sys.exit(0)
python
def exitClient(self): self.sendRtspRequest(self.TEARDOWN) #self.handler() os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT) # Delete the cache image from video rate = float(self.counter/self.frameNbr) print('-'*60 + "\nRTP Packet Loss Rate :" + str(rate) +"\n" + '-'*60) sys.exit(0)
[ "def", "exitClient", "(", "self", ")", ":", "self", ".", "sendRtspRequest", "(", "self", ".", "TEARDOWN", ")", "#self.handler()", "os", ".", "remove", "(", "CACHE_FILE_NAME", "+", "str", "(", "self", ".", "sessionId", ")", "+", "CACHE_FILE_EXT", ")", "# De...
Teardown button handler.
[ "Teardown", "button", "handler", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L42-L49
7,379
statueofmike/rtsp
scripts/others/rts2.py
Client.pauseMovie
def pauseMovie(self): """Pause button handler.""" if self.state == self.PLAYING: self.sendRtspRequest(self.PAUSE)
python
def pauseMovie(self): if self.state == self.PLAYING: self.sendRtspRequest(self.PAUSE)
[ "def", "pauseMovie", "(", "self", ")", ":", "if", "self", ".", "state", "==", "self", ".", "PLAYING", ":", "self", ".", "sendRtspRequest", "(", "self", ".", "PAUSE", ")" ]
Pause button handler.
[ "Pause", "button", "handler", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L51-L54
7,380
statueofmike/rtsp
scripts/others/rts2.py
Client.updateMovie
def updateMovie(self, imageFile): """Update the image file as video frame in the GUI.""" try: photo = ImageTk.PhotoImage(Image.open(imageFile)) #stuck here !!!!!! except: print("photo error") print('-'*60) traceback.print_exc(file=sys.stdout) print('-'*60) self.label.configure(image = photo, height=288) self.label.image = photo
python
def updateMovie(self, imageFile): try: photo = ImageTk.PhotoImage(Image.open(imageFile)) #stuck here !!!!!! except: print("photo error") print('-'*60) traceback.print_exc(file=sys.stdout) print('-'*60) self.label.configure(image = photo, height=288) self.label.image = photo
[ "def", "updateMovie", "(", "self", ",", "imageFile", ")", ":", "try", ":", "photo", "=", "ImageTk", ".", "PhotoImage", "(", "Image", ".", "open", "(", "imageFile", ")", ")", "#stuck here !!!!!!", "except", ":", "print", "(", "\"photo error\"", ")", "print"...
Update the image file as video frame in the GUI.
[ "Update", "the", "image", "file", "as", "video", "frame", "in", "the", "GUI", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L124-L135
7,381
statueofmike/rtsp
scripts/others/rts2.py
Client.sendRtspRequest
def sendRtspRequest(self, requestCode): """Send RTSP request to the server.""" #------------- # TO COMPLETE #------------- # Setup request if requestCode == self.SETUP and self.state == self.INIT: threading.Thread(target=self.recvRtspReply).start() # Update RTSP sequence number. # ... self.rtspSeq = 1 # Write the RTSP request to be sent. # request = ... request = "SETUP " + str(self.fileName) + "\n" + str(self.rtspSeq) + "\n" + " RTSP/1.0 RTP/UDP " + str(self.rtpPort) self.rtspSocket.send(request) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.SETUP # Play request elif requestCode == self.PLAY and self.state == self.READY: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "PLAY " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nPLAY request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.PLAY # Pause request elif requestCode == self.PAUSE and self.state == self.PLAYING: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "PAUSE " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nPAUSE request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.PAUSE # Resume request # Teardown request elif requestCode == self.TEARDOWN and not self.state == self.INIT: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "TEARDOWN " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nTEARDOWN request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.TEARDOWN else: return
python
def sendRtspRequest(self, requestCode): #------------- # TO COMPLETE #------------- # Setup request if requestCode == self.SETUP and self.state == self.INIT: threading.Thread(target=self.recvRtspReply).start() # Update RTSP sequence number. # ... self.rtspSeq = 1 # Write the RTSP request to be sent. # request = ... request = "SETUP " + str(self.fileName) + "\n" + str(self.rtspSeq) + "\n" + " RTSP/1.0 RTP/UDP " + str(self.rtpPort) self.rtspSocket.send(request) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.SETUP # Play request elif requestCode == self.PLAY and self.state == self.READY: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "PLAY " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nPLAY request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.PLAY # Pause request elif requestCode == self.PAUSE and self.state == self.PLAYING: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "PAUSE " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nPAUSE request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.PAUSE # Resume request # Teardown request elif requestCode == self.TEARDOWN and not self.state == self.INIT: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "TEARDOWN " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nTEARDOWN request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.TEARDOWN else: return
[ "def", "sendRtspRequest", "(", "self", ",", "requestCode", ")", ":", "#-------------", "# TO COMPLETE", "#-------------", "# Setup request", "if", "requestCode", "==", "self", ".", "SETUP", "and", "self", ".", "state", "==", "self", ".", "INIT", ":", "threading"...
Send RTSP request to the server.
[ "Send", "RTSP", "request", "to", "the", "server", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L145-L214
7,382
statueofmike/rtsp
scripts/others/rts2.py
Client.recvRtspReply
def recvRtspReply(self): """Receive RTSP reply from the server.""" while True: reply = self.rtspSocket.recv(1024) if reply: self.parseRtspReply(reply) # Close the RTSP socket upon requesting Teardown if self.requestSent == self.TEARDOWN: self.rtspSocket.shutdown(socket.SHUT_RDWR) self.rtspSocket.close() break
python
def recvRtspReply(self): while True: reply = self.rtspSocket.recv(1024) if reply: self.parseRtspReply(reply) # Close the RTSP socket upon requesting Teardown if self.requestSent == self.TEARDOWN: self.rtspSocket.shutdown(socket.SHUT_RDWR) self.rtspSocket.close() break
[ "def", "recvRtspReply", "(", "self", ")", ":", "while", "True", ":", "reply", "=", "self", ".", "rtspSocket", ".", "recv", "(", "1024", ")", "if", "reply", ":", "self", ".", "parseRtspReply", "(", "reply", ")", "# Close the RTSP socket upon requesting Teardown...
Receive RTSP reply from the server.
[ "Receive", "RTSP", "reply", "from", "the", "server", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L221-L233
7,383
statueofmike/rtsp
scripts/others/rts2.py
Client.parseRtspReply
def parseRtspReply(self, data): print("Parsing Received Rtsp data...") """Parse the RTSP reply from the server.""" lines = data.split('\n') seqNum = int(lines[1].split(' ')[1]) # Process only if the server reply's sequence number is the same as the request's if seqNum == self.rtspSeq: session = int(lines[2].split(' ')[1]) # New RTSP session ID if self.sessionId == 0: self.sessionId = session # Process only if the session ID is the same if self.sessionId == session: if int(lines[0].split(' ')[1]) == 200: if self.requestSent == self.SETUP: #------------- # TO COMPLETE #------------- # Update RTSP state. print("Updating RTSP state...") # self.state = ... self.state = self.READY # Open RTP port. #self.openRtpPort() print("Setting Up RtpPort for Video Stream") self.openRtpPort() elif self.requestSent == self.PLAY: self.state = self.PLAYING print('-'*60 + "\nClient is PLAYING...\n" + '-'*60) elif self.requestSent == self.PAUSE: self.state = self.READY # The play thread exits. A new thread is created on resume. self.playEvent.set() elif self.requestSent == self.TEARDOWN: # self.state = ... # Flag the teardownAcked to close the socket. self.teardownAcked = 1
python
def parseRtspReply(self, data): print("Parsing Received Rtsp data...") lines = data.split('\n') seqNum = int(lines[1].split(' ')[1]) # Process only if the server reply's sequence number is the same as the request's if seqNum == self.rtspSeq: session = int(lines[2].split(' ')[1]) # New RTSP session ID if self.sessionId == 0: self.sessionId = session # Process only if the session ID is the same if self.sessionId == session: if int(lines[0].split(' ')[1]) == 200: if self.requestSent == self.SETUP: #------------- # TO COMPLETE #------------- # Update RTSP state. print("Updating RTSP state...") # self.state = ... self.state = self.READY # Open RTP port. #self.openRtpPort() print("Setting Up RtpPort for Video Stream") self.openRtpPort() elif self.requestSent == self.PLAY: self.state = self.PLAYING print('-'*60 + "\nClient is PLAYING...\n" + '-'*60) elif self.requestSent == self.PAUSE: self.state = self.READY # The play thread exits. A new thread is created on resume. self.playEvent.set() elif self.requestSent == self.TEARDOWN: # self.state = ... # Flag the teardownAcked to close the socket. self.teardownAcked = 1
[ "def", "parseRtspReply", "(", "self", ",", "data", ")", ":", "print", "(", "\"Parsing Received Rtsp data...\"", ")", "lines", "=", "data", ".", "split", "(", "'\\n'", ")", "seqNum", "=", "int", "(", "lines", "[", "1", "]", ".", "split", "(", "' '", ")"...
Parse the RTSP reply from the server.
[ "Parse", "the", "RTSP", "reply", "from", "the", "server", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L235-L278
7,384
statueofmike/rtsp
rtsp/cvstream.py
LocalVideoFeed.preview
def preview(self): """ Blocking function. Opens OpenCV window to display stream. """ win_name = 'Camera' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name,20,20) self.open() while(self.isOpened()): cv2.imshow(win_name,self._stream.read()[1]) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
python
def preview(self): win_name = 'Camera' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name,20,20) self.open() while(self.isOpened()): cv2.imshow(win_name,self._stream.read()[1]) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
[ "def", "preview", "(", "self", ")", ":", "win_name", "=", "'Camera'", "cv2", ".", "namedWindow", "(", "win_name", ",", "cv2", ".", "WINDOW_AUTOSIZE", ")", "cv2", ".", "moveWindow", "(", "win_name", ",", "20", ",", "20", ")", "self", ".", "open", "(", ...
Blocking function. Opens OpenCV window to display stream.
[ "Blocking", "function", ".", "Opens", "OpenCV", "window", "to", "display", "stream", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/rtsp/cvstream.py#L87-L99
7,385
statueofmike/rtsp
scripts/others/rts2.bak.py
Client.createWidgets
def createWidgets(self): """Build GUI.""" # Create Setup button self.setup = Button(self.master, width=20, padx=3, pady=3) self.setup["text"] = "Setup" self.setup["command"] = self.setupMovie self.setup.grid(row=1, column=0, padx=2, pady=2) # Create Play button self.start = Button(self.master, width=20, padx=3, pady=3) self.start["text"] = "Play" self.start["command"] = self.playMovie self.start.grid(row=1, column=1, padx=2, pady=2) # Create Pause button self.pause = Button(self.master, width=20, padx=3, pady=3) self.pause["text"] = "Pause" self.pause["command"] = self.pauseMovie self.pause.grid(row=1, column=2, padx=2, pady=2) # Create Teardown button self.teardown = Button(self.master, width=20, padx=3, pady=3) self.teardown["text"] = "Teardown" self.teardown["command"] = self.exitClient self.teardown.grid(row=1, column=3, padx=2, pady=2) # Create a label to display the movie self.label = Label(self.master, height=19) self.label.grid(row=0, column=0, columnspan=4, sticky=W+E+N+S, padx=5, pady=5)
python
def createWidgets(self): # Create Setup button self.setup = Button(self.master, width=20, padx=3, pady=3) self.setup["text"] = "Setup" self.setup["command"] = self.setupMovie self.setup.grid(row=1, column=0, padx=2, pady=2) # Create Play button self.start = Button(self.master, width=20, padx=3, pady=3) self.start["text"] = "Play" self.start["command"] = self.playMovie self.start.grid(row=1, column=1, padx=2, pady=2) # Create Pause button self.pause = Button(self.master, width=20, padx=3, pady=3) self.pause["text"] = "Pause" self.pause["command"] = self.pauseMovie self.pause.grid(row=1, column=2, padx=2, pady=2) # Create Teardown button self.teardown = Button(self.master, width=20, padx=3, pady=3) self.teardown["text"] = "Teardown" self.teardown["command"] = self.exitClient self.teardown.grid(row=1, column=3, padx=2, pady=2) # Create a label to display the movie self.label = Label(self.master, height=19) self.label.grid(row=0, column=0, columnspan=4, sticky=W+E+N+S, padx=5, pady=5)
[ "def", "createWidgets", "(", "self", ")", ":", "# Create Setup button", "self", ".", "setup", "=", "Button", "(", "self", ".", "master", ",", "width", "=", "20", ",", "padx", "=", "3", ",", "pady", "=", "3", ")", "self", ".", "setup", "[", "\"text\""...
Build GUI.
[ "Build", "GUI", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.bak.py#L42-L70
7,386
statueofmike/rtsp
scripts/others/rts2.bak.py
Client.playMovie
def playMovie(self): """Play button handler.""" if self.state == self.READY: # Create a new thread to listen for RTP packets print "Playing Movie" threading.Thread(target=self.listenRtp).start() self.playEvent = threading.Event() self.playEvent.clear() self.sendRtspRequest(self.PLAY)
python
def playMovie(self): if self.state == self.READY: # Create a new thread to listen for RTP packets print "Playing Movie" threading.Thread(target=self.listenRtp).start() self.playEvent = threading.Event() self.playEvent.clear() self.sendRtspRequest(self.PLAY)
[ "def", "playMovie", "(", "self", ")", ":", "if", "self", ".", "state", "==", "self", ".", "READY", ":", "# Create a new thread to listen for RTP packets", "print", "\"Playing Movie\"", "threading", ".", "Thread", "(", "target", "=", "self", ".", "listenRtp", ")"...
Play button handler.
[ "Play", "button", "handler", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.bak.py#L92-L100
7,387
statueofmike/rtsp
scripts/others/rts2.bak.py
Client.writeFrame
def writeFrame(self, data): """Write the received frame to a temp image file. Return the image file.""" cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT try: file = open(cachename, "wb") except: print "file open error" try: file.write(data) except: print "file write error" file.close() return cachename
python
def writeFrame(self, data): cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT try: file = open(cachename, "wb") except: print "file open error" try: file.write(data) except: print "file write error" file.close() return cachename
[ "def", "writeFrame", "(", "self", ",", "data", ")", ":", "cachename", "=", "CACHE_FILE_NAME", "+", "str", "(", "self", ".", "sessionId", ")", "+", "CACHE_FILE_EXT", "try", ":", "file", "=", "open", "(", "cachename", ",", "\"wb\"", ")", "except", ":", "...
Write the received frame to a temp image file. Return the image file.
[ "Write", "the", "received", "frame", "to", "a", "temp", "image", "file", ".", "Return", "the", "image", "file", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.bak.py#L141-L158
7,388
statueofmike/rtsp
scripts/others/rts2.bak.py
Client.handler
def handler(self): """Handler on explicitly closing the GUI window.""" self.pauseMovie() if tkMessageBox.askokcancel("Quit?", "Are you sure you want to quit?"): self.exitClient() else: # When the user presses cancel, resume playing. #self.playMovie() print "Playing Movie" threading.Thread(target=self.listenRtp).start() #self.playEvent = threading.Event() #self.playEvent.clear() self.sendRtspRequest(self.PLAY)
python
def handler(self): self.pauseMovie() if tkMessageBox.askokcancel("Quit?", "Are you sure you want to quit?"): self.exitClient() else: # When the user presses cancel, resume playing. #self.playMovie() print "Playing Movie" threading.Thread(target=self.listenRtp).start() #self.playEvent = threading.Event() #self.playEvent.clear() self.sendRtspRequest(self.PLAY)
[ "def", "handler", "(", "self", ")", ":", "self", ".", "pauseMovie", "(", ")", "if", "tkMessageBox", ".", "askokcancel", "(", "\"Quit?\"", ",", "\"Are you sure you want to quit?\"", ")", ":", "self", ".", "exitClient", "(", ")", "else", ":", "# When the user pr...
Handler on explicitly closing the GUI window.
[ "Handler", "on", "explicitly", "closing", "the", "GUI", "window", "." ]
4816de2da3cc9966122c8511943e6db713052a17
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.bak.py#L344-L355
7,389
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/parser.py
ChatThreadParser.skip
def skip(self): """ Eats through the input iterator without recording the content. """ for pos, element in self.element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "div" and "thread" in class_attr and pos == "end": break
python
def skip(self): for pos, element in self.element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "div" and "thread" in class_attr and pos == "end": break
[ "def", "skip", "(", "self", ")", ":", "for", "pos", ",", "element", "in", "self", ".", "element_iter", ":", "tag", ",", "class_attr", "=", "_tag_and_class_attr", "(", "element", ")", "if", "tag", "==", "\"div\"", "and", "\"thread\"", "in", "class_attr", ...
Eats through the input iterator without recording the content.
[ "Eats", "through", "the", "input", "iterator", "without", "recording", "the", "content", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/parser.py#L131-L138
7,390
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/parser.py
MessageHtmlParser.should_record_thread
def should_record_thread(self, participants): """ Determines if the thread should be parsed based on the participants and the filter given. For example, if the filter states ['jack', 'billy joe'], then only threads with exactly two participants (excluding the owner of the chat history) containing someone with the first or last name 'Jack' and someone named 'Billy Joel' will be included. Any of the following would match that criteria: - Jack Stevenson, Billy Joel - Billy Joel, Jack Stevens - Jack Jenson, Billy Joel - Jack Jack, Billy Joel participants -- the participants of the thread (excluding the history owner) """ if not self.thread_filter: return True if len(participants) != len(self.thread_filter): return False participants = [[p.lower()] + p.lower().split(" ") for p in participants] matches = defaultdict(set) for e, p in enumerate(participants): for f in self.thread_filter: if f in p: matches[f].add(e) matched = set() for f in matches: if len(matches[f]) == 0: return False matched |= matches[f] return len(matched) == len(participants)
python
def should_record_thread(self, participants): if not self.thread_filter: return True if len(participants) != len(self.thread_filter): return False participants = [[p.lower()] + p.lower().split(" ") for p in participants] matches = defaultdict(set) for e, p in enumerate(participants): for f in self.thread_filter: if f in p: matches[f].add(e) matched = set() for f in matches: if len(matches[f]) == 0: return False matched |= matches[f] return len(matched) == len(participants)
[ "def", "should_record_thread", "(", "self", ",", "participants", ")", ":", "if", "not", "self", ".", "thread_filter", ":", "return", "True", "if", "len", "(", "participants", ")", "!=", "len", "(", "self", ".", "thread_filter", ")", ":", "return", "False",...
Determines if the thread should be parsed based on the participants and the filter given. For example, if the filter states ['jack', 'billy joe'], then only threads with exactly two participants (excluding the owner of the chat history) containing someone with the first or last name 'Jack' and someone named 'Billy Joel' will be included. Any of the following would match that criteria: - Jack Stevenson, Billy Joel - Billy Joel, Jack Stevens - Jack Jenson, Billy Joel - Jack Jack, Billy Joel participants -- the participants of the thread (excluding the history owner)
[ "Determines", "if", "the", "thread", "should", "be", "parsed", "based", "on", "the", "participants", "and", "the", "filter", "given", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/parser.py#L218-L255
7,391
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/parser.py
MessageHtmlParser.parse_thread
def parse_thread(self, participants, element_iter, require_flush): """ Parses a thread with appropriate CLI feedback. :param participants: The participants in this thread. :param element_iter: The XML iterator to parse the data from. :param require_flush: Whether the iterator needs to be flushed if it is determined that the thread should be skipped. :return: A `ChatThread` object if not skipped, otherwise `None`. """ # Very rarely threads may lack information on who the # participants are. We will consider those threads corrupted # and skip them. participants_text = _truncate(', '.join(participants), 60) if participants: skip_thread = not self.should_record_thread(participants) participants_text = yellow("[%s]" % participants_text) else: participants_text = "unknown participants" skip_thread = True if skip_thread: line = "\rSkipping chat thread with %s..." % \ yellow(participants_text) else: participants_key = ", ".join(participants) if participants_key in self.chat_threads: thread_current_len = len(self.chat_threads[participants_key]) line = "\rContinuing chat thread with %s %s..." \ % (yellow(participants_text), magenta("<@%d messages>" % thread_current_len)) else: line = "\rDiscovered chat thread with %s..." \ % yellow(participants_text) if self.progress_output: sys.stderr.write(line.ljust(self.last_line_len)) sys.stderr.flush() self.last_line_len = len(line) parser = ChatThreadParser( element_iter, self.timezone_hints, self.use_utc, self.name_resolver, self.no_sender_warning, self.seq_num) if skip_thread: if require_flush: parser.skip() else: self.no_sender_warning, thread = parser.parse(participants) return thread
python
def parse_thread(self, participants, element_iter, require_flush): # Very rarely threads may lack information on who the # participants are. We will consider those threads corrupted # and skip them. participants_text = _truncate(', '.join(participants), 60) if participants: skip_thread = not self.should_record_thread(participants) participants_text = yellow("[%s]" % participants_text) else: participants_text = "unknown participants" skip_thread = True if skip_thread: line = "\rSkipping chat thread with %s..." % \ yellow(participants_text) else: participants_key = ", ".join(participants) if participants_key in self.chat_threads: thread_current_len = len(self.chat_threads[participants_key]) line = "\rContinuing chat thread with %s %s..." \ % (yellow(participants_text), magenta("<@%d messages>" % thread_current_len)) else: line = "\rDiscovered chat thread with %s..." \ % yellow(participants_text) if self.progress_output: sys.stderr.write(line.ljust(self.last_line_len)) sys.stderr.flush() self.last_line_len = len(line) parser = ChatThreadParser( element_iter, self.timezone_hints, self.use_utc, self.name_resolver, self.no_sender_warning, self.seq_num) if skip_thread: if require_flush: parser.skip() else: self.no_sender_warning, thread = parser.parse(participants) return thread
[ "def", "parse_thread", "(", "self", ",", "participants", ",", "element_iter", ",", "require_flush", ")", ":", "# Very rarely threads may lack information on who the", "# participants are. We will consider those threads corrupted", "# and skip them.", "participants_text", "=", "_tru...
Parses a thread with appropriate CLI feedback. :param participants: The participants in this thread. :param element_iter: The XML iterator to parse the data from. :param require_flush: Whether the iterator needs to be flushed if it is determined that the thread should be skipped. :return: A `ChatThread` object if not skipped, otherwise `None`.
[ "Parses", "a", "thread", "with", "appropriate", "CLI", "feedback", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/parser.py#L275-L322
7,392
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/parser.py
LegacyMessageHtmlParser.parse_impl
def parse_impl(self): """ Parses the HTML content as a stream. This is far less memory intensive than loading the entire HTML file into memory, like BeautifulSoup does. """ # Cast to str to ensure not unicode under Python 2, as the parser # doesn't like that. parser = XMLParser(encoding=str('UTF-8')) element_iter = ET.iterparse(self.handle, events=("start", "end"), parser=parser) for pos, element in element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "h1" and pos == "end": if not self.user: self.user = element.text.strip() elif tag == "div" and "thread" in class_attr and pos == "start": participants = self.parse_participants(element) thread = self.parse_thread(participants, element_iter, True) self.save_thread(thread)
python
def parse_impl(self): # Cast to str to ensure not unicode under Python 2, as the parser # doesn't like that. parser = XMLParser(encoding=str('UTF-8')) element_iter = ET.iterparse(self.handle, events=("start", "end"), parser=parser) for pos, element in element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "h1" and pos == "end": if not self.user: self.user = element.text.strip() elif tag == "div" and "thread" in class_attr and pos == "start": participants = self.parse_participants(element) thread = self.parse_thread(participants, element_iter, True) self.save_thread(thread)
[ "def", "parse_impl", "(", "self", ")", ":", "# Cast to str to ensure not unicode under Python 2, as the parser", "# doesn't like that.", "parser", "=", "XMLParser", "(", "encoding", "=", "str", "(", "'UTF-8'", ")", ")", "element_iter", "=", "ET", ".", "iterparse", "("...
Parses the HTML content as a stream. This is far less memory intensive than loading the entire HTML file into memory, like BeautifulSoup does.
[ "Parses", "the", "HTML", "content", "as", "a", "stream", ".", "This", "is", "far", "less", "memory", "intensive", "than", "loading", "the", "entire", "HTML", "file", "into", "memory", "like", "BeautifulSoup", "does", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/parser.py#L385-L404
7,393
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/main.py
messages
def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory): """ Conversion of Facebook chat history. """ with colorize_output(nocolor): try: chat_history = _process_history( path=path, thread=thread, timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) except ProcessingFailure: return if directory: set_all_color(enabled=False) write(fmt, chat_history, directory or sys.stdout)
python
def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory): with colorize_output(nocolor): try: chat_history = _process_history( path=path, thread=thread, timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) except ProcessingFailure: return if directory: set_all_color(enabled=False) write(fmt, chat_history, directory or sys.stdout)
[ "def", "messages", "(", "path", ",", "thread", ",", "fmt", ",", "nocolor", ",", "timezones", ",", "utc", ",", "noprogress", ",", "resolve", ",", "directory", ")", ":", "with", "colorize_output", "(", "nocolor", ")", ":", "try", ":", "chat_history", "=", ...
Conversion of Facebook chat history.
[ "Conversion", "of", "Facebook", "chat", "history", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/main.py#L174-L187
7,394
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/main.py
stats
def stats(path, fmt, nocolor, timezones, utc, noprogress, most_common, resolve, length): """Analysis of Facebook chat history.""" with colorize_output(nocolor): try: chat_history = _process_history( path=path, thread='', timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) except ProcessingFailure: return statistics = ChatHistoryStatistics( chat_history, most_common=None if most_common < 0 else most_common) if fmt == 'text': statistics.write_text(sys.stdout, -1 if length < 0 else length) elif fmt == 'json': statistics.write_json(sys.stdout) elif fmt == 'pretty-json': statistics.write_json(sys.stdout, pretty=True) elif fmt == 'yaml': statistics.write_yaml(sys.stdout)
python
def stats(path, fmt, nocolor, timezones, utc, noprogress, most_common, resolve, length): with colorize_output(nocolor): try: chat_history = _process_history( path=path, thread='', timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) except ProcessingFailure: return statistics = ChatHistoryStatistics( chat_history, most_common=None if most_common < 0 else most_common) if fmt == 'text': statistics.write_text(sys.stdout, -1 if length < 0 else length) elif fmt == 'json': statistics.write_json(sys.stdout) elif fmt == 'pretty-json': statistics.write_json(sys.stdout, pretty=True) elif fmt == 'yaml': statistics.write_yaml(sys.stdout)
[ "def", "stats", "(", "path", ",", "fmt", ",", "nocolor", ",", "timezones", ",", "utc", ",", "noprogress", ",", "most_common", ",", "resolve", ",", "length", ")", ":", "with", "colorize_output", "(", "nocolor", ")", ":", "try", ":", "chat_history", "=", ...
Analysis of Facebook chat history.
[ "Analysis", "of", "Facebook", "chat", "history", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/main.py#L203-L221
7,395
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/utils.py
set_stream_color
def set_stream_color(stream, disabled): """ Remember what our original streams were so that we can colorize them separately, which colorama doesn't seem to natively support. """ original_stdout = sys.stdout original_stderr = sys.stderr init(strip=disabled) if stream != original_stdout: sys.stdout = original_stdout sys.stderr = BinaryStreamWrapper(stream, sys.stderr) if stream != original_stderr: sys.stderr = original_stderr sys.stdout = BinaryStreamWrapper(stream, sys.stdout)
python
def set_stream_color(stream, disabled): original_stdout = sys.stdout original_stderr = sys.stderr init(strip=disabled) if stream != original_stdout: sys.stdout = original_stdout sys.stderr = BinaryStreamWrapper(stream, sys.stderr) if stream != original_stderr: sys.stderr = original_stderr sys.stdout = BinaryStreamWrapper(stream, sys.stdout)
[ "def", "set_stream_color", "(", "stream", ",", "disabled", ")", ":", "original_stdout", "=", "sys", ".", "stdout", "original_stderr", "=", "sys", ".", "stderr", "init", "(", "strip", "=", "disabled", ")", "if", "stream", "!=", "original_stdout", ":", "sys", ...
Remember what our original streams were so that we can colorize them separately, which colorama doesn't seem to natively support.
[ "Remember", "what", "our", "original", "streams", "were", "so", "that", "we", "can", "colorize", "them", "separately", "which", "colorama", "doesn", "t", "seem", "to", "natively", "support", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/utils.py#L31-L47
7,396
ownaginatious/fbchat-archive-parser
fbchat_archive_parser/name_resolver.py
FacebookNameResolver._manual_lookup
def _manual_lookup(self, facebook_id, facebook_id_string): """ People who we have not communicated with in a long time will not appear in the look-ahead cache that Facebook keeps. We must manually resolve them. :param facebook_id: Profile ID of the user to lookup. :return: """ resp = self._session.get( 'https://www.facebook.com/%s' % facebook_id, allow_redirects=True, timeout=10 ) # No point in trying to get this using BeautifulSoup. The HTML here # is the very epitome of what it is to be invalid... m = _MANUAL_NAME_MATCHER.search(resp.text) if m: name = m.group(1) else: name = facebook_id_string self._cached_profiles[facebook_id] = name return name
python
def _manual_lookup(self, facebook_id, facebook_id_string): resp = self._session.get( 'https://www.facebook.com/%s' % facebook_id, allow_redirects=True, timeout=10 ) # No point in trying to get this using BeautifulSoup. The HTML here # is the very epitome of what it is to be invalid... m = _MANUAL_NAME_MATCHER.search(resp.text) if m: name = m.group(1) else: name = facebook_id_string self._cached_profiles[facebook_id] = name return name
[ "def", "_manual_lookup", "(", "self", ",", "facebook_id", ",", "facebook_id_string", ")", ":", "resp", "=", "self", ".", "_session", ".", "get", "(", "'https://www.facebook.com/%s'", "%", "facebook_id", ",", "allow_redirects", "=", "True", ",", "timeout", "=", ...
People who we have not communicated with in a long time will not appear in the look-ahead cache that Facebook keeps. We must manually resolve them. :param facebook_id: Profile ID of the user to lookup. :return:
[ "People", "who", "we", "have", "not", "communicated", "with", "in", "a", "long", "time", "will", "not", "appear", "in", "the", "look", "-", "ahead", "cache", "that", "Facebook", "keeps", ".", "We", "must", "manually", "resolve", "them", "." ]
f1e66cea864f1c07b825fc036071f443693231d5
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/name_resolver.py#L125-L146
7,397
cqparts/cqparts
src/cqparts/codec/__init__.py
get_exporter
def get_exporter(obj, name): """ Get an exporter for the :param obj: object to export :type obj: :class:`Component <cqparts.Component>` :param name: registered name of exporter :type name: :class:`str` :return: an exporter instance of the given type :rtype: :class:`Exporter` :raises TypeError: if exporter cannot be found """ if name not in exporter_index: raise TypeError( ("exporter type '%s' is not registered: " % name) + ("registered types: %r" % sorted(exporter_index.keys())) ) for base_class in exporter_index[name]: if isinstance(obj, base_class): return exporter_index[name][base_class](obj) raise TypeError("exporter type '%s' for a %r is not registered" % ( name, type(obj) ))
python
def get_exporter(obj, name): if name not in exporter_index: raise TypeError( ("exporter type '%s' is not registered: " % name) + ("registered types: %r" % sorted(exporter_index.keys())) ) for base_class in exporter_index[name]: if isinstance(obj, base_class): return exporter_index[name][base_class](obj) raise TypeError("exporter type '%s' for a %r is not registered" % ( name, type(obj) ))
[ "def", "get_exporter", "(", "obj", ",", "name", ")", ":", "if", "name", "not", "in", "exporter_index", ":", "raise", "TypeError", "(", "(", "\"exporter type '%s' is not registered: \"", "%", "name", ")", "+", "(", "\"registered types: %r\"", "%", "sorted", "(", ...
Get an exporter for the :param obj: object to export :type obj: :class:`Component <cqparts.Component>` :param name: registered name of exporter :type name: :class:`str` :return: an exporter instance of the given type :rtype: :class:`Exporter` :raises TypeError: if exporter cannot be found
[ "Get", "an", "exporter", "for", "the" ]
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/__init__.py#L77-L101
7,398
cqparts/cqparts
src/cqparts/codec/__init__.py
get_importer
def get_importer(cls, name): """ Get an importer for the given registered type. :param cls: class to import :type cls: :class:`type` :param name: registered name of importer :type name: :class:`str` :return: an importer instance of the given type :rtype: :class:`Importer` :raises TypeError: if importer cannot be found """ if name not in importer_index: raise TypeError( ("importer type '%s' is not registered: " % name) + ("registered types: %r" % sorted(importer_index.keys())) ) for base_class in importer_index[name]: if issubclass(cls, base_class): return importer_index[name][base_class](cls) raise TypeError("importer type '%s' for a %r is not registered" % ( name, cls ))
python
def get_importer(cls, name): if name not in importer_index: raise TypeError( ("importer type '%s' is not registered: " % name) + ("registered types: %r" % sorted(importer_index.keys())) ) for base_class in importer_index[name]: if issubclass(cls, base_class): return importer_index[name][base_class](cls) raise TypeError("importer type '%s' for a %r is not registered" % ( name, cls ))
[ "def", "get_importer", "(", "cls", ",", "name", ")", ":", "if", "name", "not", "in", "importer_index", ":", "raise", "TypeError", "(", "(", "\"importer type '%s' is not registered: \"", "%", "name", ")", "+", "(", "\"registered types: %r\"", "%", "sorted", "(", ...
Get an importer for the given registered type. :param cls: class to import :type cls: :class:`type` :param name: registered name of importer :type name: :class:`str` :return: an importer instance of the given type :rtype: :class:`Importer` :raises TypeError: if importer cannot be found
[ "Get", "an", "importer", "for", "the", "given", "registered", "type", "." ]
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/__init__.py#L146-L170
7,399
cqparts/cqparts
src/cqparts_fasteners/catalogue/scripts/bunnings.py
BunningsProductSpider.parse
def parse(self, response): """Parse pagenated list of products""" # Check if page is out of range no_more_products = re.search( r'No matching products were found', response.css('div.paged-results').extract_first(), flags=re.I ) if no_more_products: pass # no more pages to populate, stop scraping else: # Scrape products list for product in response.css('article.product-list__item'): product_url = product.css('a::attr("href")').extract_first() yield response.follow(product_url, self.parse_detail) (base, params) = split_url(response.url) params.update({'page': int(params.get('page', '1')) + 1}) next_page_url = join_url(base, params) self.logger.info(next_page_url) yield response.follow(next_page_url, self.parse)
python
def parse(self, response): # Check if page is out of range no_more_products = re.search( r'No matching products were found', response.css('div.paged-results').extract_first(), flags=re.I ) if no_more_products: pass # no more pages to populate, stop scraping else: # Scrape products list for product in response.css('article.product-list__item'): product_url = product.css('a::attr("href")').extract_first() yield response.follow(product_url, self.parse_detail) (base, params) = split_url(response.url) params.update({'page': int(params.get('page', '1')) + 1}) next_page_url = join_url(base, params) self.logger.info(next_page_url) yield response.follow(next_page_url, self.parse)
[ "def", "parse", "(", "self", ",", "response", ")", ":", "# Check if page is out of range", "no_more_products", "=", "re", ".", "search", "(", "r'No matching products were found'", ",", "response", ".", "css", "(", "'div.paged-results'", ")", ".", "extract_first", "(...
Parse pagenated list of products
[ "Parse", "pagenated", "list", "of", "products" ]
018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_fasteners/catalogue/scripts/bunnings.py#L36-L58