repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
sorrowless/battery_systray
batticon/batticon.py
Application.tooltip_query
def tooltip_query(self, widget, x, y, keyboard_mode, tooltip): """ Set tooltip which appears when you hover mouse curson onto icon in system panel. """ tooltip.set_text(subprocess.getoutput("acpi")) return True
python
def tooltip_query(self, widget, x, y, keyboard_mode, tooltip): """ Set tooltip which appears when you hover mouse curson onto icon in system panel. """ tooltip.set_text(subprocess.getoutput("acpi")) return True
[ "def", "tooltip_query", "(", "self", ",", "widget", ",", "x", ",", "y", ",", "keyboard_mode", ",", "tooltip", ")", ":", "tooltip", ".", "set_text", "(", "subprocess", ".", "getoutput", "(", "\"acpi\"", ")", ")", "return", "True" ]
Set tooltip which appears when you hover mouse curson onto icon in system panel.
[ "Set", "tooltip", "which", "appears", "when", "you", "hover", "mouse", "curson", "onto", "icon", "in", "system", "panel", "." ]
train
https://github.com/sorrowless/battery_systray/blob/4594fca6f357660e081c2800af4a8b21c607bef1/batticon/batticon.py#L161-L166
ishxiao/aps
aps/about.py
about
def about(): """ About box for aps. Gives version numbers for aps, NumPy, SciPy, Cython, and MatPlotLib. """ print("") print("aps: APS Journals API in Python for Humans") print("Copyright (c) 2017 and later.") print("Xiao Shang") print("") print("aps Version: %s" % aps.__version__) print("Numpy Version: %s" % numpy.__version__) print("Scipy Version: %s" % scipy.__version__) try: import Cython cython_ver = Cython.__version__ except: cython_ver = 'None' print("Cython Version: %s" % cython_ver) try: import matplotlib matplotlib_ver = matplotlib.__version__ except: matplotlib_ver = 'None' print("Matplotlib Version: %s" % matplotlib_ver) print("Python Version: %d.%d.%d" % sys.version_info[0:3]) print("Number of CPUs: %s" % hardware_info()['cpus']) # print("BLAS Info: %s" % _blas_info()) print("Platform Info: %s (%s)" % (platform.system(), platform.machine())) aps_install_path = os.path.dirname(inspect.getsourcefile(aps)) print("Installation path: %s" % aps_install_path) print("")
python
def about(): """ About box for aps. Gives version numbers for aps, NumPy, SciPy, Cython, and MatPlotLib. """ print("") print("aps: APS Journals API in Python for Humans") print("Copyright (c) 2017 and later.") print("Xiao Shang") print("") print("aps Version: %s" % aps.__version__) print("Numpy Version: %s" % numpy.__version__) print("Scipy Version: %s" % scipy.__version__) try: import Cython cython_ver = Cython.__version__ except: cython_ver = 'None' print("Cython Version: %s" % cython_ver) try: import matplotlib matplotlib_ver = matplotlib.__version__ except: matplotlib_ver = 'None' print("Matplotlib Version: %s" % matplotlib_ver) print("Python Version: %d.%d.%d" % sys.version_info[0:3]) print("Number of CPUs: %s" % hardware_info()['cpus']) # print("BLAS Info: %s" % _blas_info()) print("Platform Info: %s (%s)" % (platform.system(), platform.machine())) aps_install_path = os.path.dirname(inspect.getsourcefile(aps)) print("Installation path: %s" % aps_install_path) print("")
[ "def", "about", "(", ")", ":", "print", "(", "\"\"", ")", "print", "(", "\"aps: APS Journals API in Python for Humans\"", ")", "print", "(", "\"Copyright (c) 2017 and later.\"", ")", "print", "(", "\"Xiao Shang\"", ")", "print", "(", "\"\"", ")", "print", "(", "...
About box for aps. Gives version numbers for aps, NumPy, SciPy, Cython, and MatPlotLib.
[ "About", "box", "for", "aps", ".", "Gives", "version", "numbers", "for", "aps", "NumPy", "SciPy", "Cython", "and", "MatPlotLib", "." ]
train
https://github.com/ishxiao/aps/blob/faa4329b26eed257a0ca45df57561eff1a3dd133/aps/about.py#L27-L59
robehickman/simple-http-file-sync
shttpfs/server.py
success
def success(headers = None, data = ''): """ Generate success JSON to send to client """ passed_headers = {} if headers is None else headers if isinstance(data, dict): data = json.dumps(data) ret_headers = {'status' : 'ok'} ret_headers.update(passed_headers) return server_responce(ret_headers, data)
python
def success(headers = None, data = ''): """ Generate success JSON to send to client """ passed_headers = {} if headers is None else headers if isinstance(data, dict): data = json.dumps(data) ret_headers = {'status' : 'ok'} ret_headers.update(passed_headers) return server_responce(ret_headers, data)
[ "def", "success", "(", "headers", "=", "None", ",", "data", "=", "''", ")", ":", "passed_headers", "=", "{", "}", "if", "headers", "is", "None", "else", "headers", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "json", ".", "d...
Generate success JSON to send to client
[ "Generate", "success", "JSON", "to", "send", "to", "client" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L41-L47
robehickman/simple-http-file-sync
shttpfs/server.py
lock_access
def lock_access(repository_path, callback): """ Synchronise access to the user file between processes, this specifies which user is allowed write access at the current time """ with open(cpjoin(repository_path, 'lock_file'), 'w') as fd: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) returned = callback() fcntl.flock(fd, fcntl.LOCK_UN) return returned except IOError: return fail(lock_fail_msg)
python
def lock_access(repository_path, callback): """ Synchronise access to the user file between processes, this specifies which user is allowed write access at the current time """ with open(cpjoin(repository_path, 'lock_file'), 'w') as fd: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) returned = callback() fcntl.flock(fd, fcntl.LOCK_UN) return returned except IOError: return fail(lock_fail_msg)
[ "def", "lock_access", "(", "repository_path", ",", "callback", ")", ":", "with", "open", "(", "cpjoin", "(", "repository_path", ",", "'lock_file'", ")", ",", "'w'", ")", "as", "fd", ":", "try", ":", "fcntl", ".", "flock", "(", "fd", ",", "fcntl", ".", ...
Synchronise access to the user file between processes, this specifies which user is allowed write access at the current time
[ "Synchronise", "access", "to", "the", "user", "file", "between", "processes", "this", "specifies", "which", "user", "is", "allowed", "write", "access", "at", "the", "current", "time" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L57-L68
robehickman/simple-http-file-sync
shttpfs/server.py
update_user_lock
def update_user_lock(repository_path, session_token): """ Write or clear the user lock file """ # NOTE ALWAYS use within lock access callback # While the user lock file should ALWAYS be written only within a lock_access # callback, it is sometimes read asynchronously. Because of this updates to # the file must be atomic. Write plus move is used to achieve this. real_path = cpjoin(repository_path, 'user_file') tmp_path = cpjoin(repository_path, 'new_user_file') with open(tmp_path, 'w') as fd2: if session_token is None: fd2.write('') else: fd2.write(json.dumps({'session_token' : session_token, 'expires' : int(time.time()) + 30})) fd2.flush() os.rename(tmp_path, real_path)
python
def update_user_lock(repository_path, session_token): """ Write or clear the user lock file """ # NOTE ALWAYS use within lock access callback # While the user lock file should ALWAYS be written only within a lock_access # callback, it is sometimes read asynchronously. Because of this updates to # the file must be atomic. Write plus move is used to achieve this. real_path = cpjoin(repository_path, 'user_file') tmp_path = cpjoin(repository_path, 'new_user_file') with open(tmp_path, 'w') as fd2: if session_token is None: fd2.write('') else: fd2.write(json.dumps({'session_token' : session_token, 'expires' : int(time.time()) + 30})) fd2.flush() os.rename(tmp_path, real_path)
[ "def", "update_user_lock", "(", "repository_path", ",", "session_token", ")", ":", "# NOTE ALWAYS use within lock access callback", "# While the user lock file should ALWAYS be written only within a lock_access", "# callback, it is sometimes read asynchronously. Because of this updates to", "#...
Write or clear the user lock file
[ "Write", "or", "clear", "the", "user", "lock", "file" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L72-L85
robehickman/simple-http-file-sync
shttpfs/server.py
can_aquire_user_lock
def can_aquire_user_lock(repository_path, session_token): """ Allow a user to acquire the lock if no other user is currently using it, if the original user is returning, presumably after a network error, or if the lock has expired. """ # NOTE ALWAYS use within lock access callback user_file_path = cpjoin(repository_path, 'user_file') if not os.path.isfile(user_file_path): return True with open(user_file_path, 'r') as fd2: content = fd2.read() if len(content) == 0: return True try: res = json.loads(content) except ValueError: return True if res['expires'] < int(time.time()): return True elif res['session_token'] == session_token: return True return False
python
def can_aquire_user_lock(repository_path, session_token): """ Allow a user to acquire the lock if no other user is currently using it, if the original user is returning, presumably after a network error, or if the lock has expired. """ # NOTE ALWAYS use within lock access callback user_file_path = cpjoin(repository_path, 'user_file') if not os.path.isfile(user_file_path): return True with open(user_file_path, 'r') as fd2: content = fd2.read() if len(content) == 0: return True try: res = json.loads(content) except ValueError: return True if res['expires'] < int(time.time()): return True elif res['session_token'] == session_token: return True return False
[ "def", "can_aquire_user_lock", "(", "repository_path", ",", "session_token", ")", ":", "# NOTE ALWAYS use within lock access callback", "user_file_path", "=", "cpjoin", "(", "repository_path", ",", "'user_file'", ")", "if", "not", "os", ".", "path", ".", "isfile", "("...
Allow a user to acquire the lock if no other user is currently using it, if the original user is returning, presumably after a network error, or if the lock has expired.
[ "Allow", "a", "user", "to", "acquire", "the", "lock", "if", "no", "other", "user", "is", "currently", "using", "it", "if", "the", "original", "user", "is", "returning", "presumably", "after", "a", "network", "error", "or", "if", "the", "lock", "has", "ex...
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L89-L103
robehickman/simple-http-file-sync
shttpfs/server.py
varify_user_lock
def varify_user_lock(repository_path, session_token): """ Verify that a returning user has a valid token and their lock has not expired """ with open(cpjoin(repository_path, 'user_file'), 'r') as fd2: content = fd2.read() if len(content) == 0: return False try: res = json.loads(content) except ValueError: return False return res['session_token'] == session_token and int(time.time()) < int(res['expires']) return False
python
def varify_user_lock(repository_path, session_token): """ Verify that a returning user has a valid token and their lock has not expired """ with open(cpjoin(repository_path, 'user_file'), 'r') as fd2: content = fd2.read() if len(content) == 0: return False try: res = json.loads(content) except ValueError: return False return res['session_token'] == session_token and int(time.time()) < int(res['expires']) return False
[ "def", "varify_user_lock", "(", "repository_path", ",", "session_token", ")", ":", "with", "open", "(", "cpjoin", "(", "repository_path", ",", "'user_file'", ")", ",", "'r'", ")", "as", "fd2", ":", "content", "=", "fd2", ".", "read", "(", ")", "if", "len...
Verify that a returning user has a valid token and their lock has not expired
[ "Verify", "that", "a", "returning", "user", "has", "a", "valid", "token", "and", "their", "lock", "has", "not", "expired" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L116-L125
robehickman/simple-http-file-sync
shttpfs/server.py
auth_db_connect
def auth_db_connect(db_path): """ An SQLite database is used to store authentication transient data, this is tokens, strings of random data which are signed by the client, and session_tokens which identify authenticated users """ def dict_factory(cursor, row): return {col[0] : row[idx] for idx,col in enumerate(cursor.description)} conn = db.connect(db_path) conn.row_factory = dict_factory if not auth_db_connect.init: conn.execute('create table if not exists tokens (expires int, token text, ip text)') conn.execute('create table if not exists session_tokens (expires int, token text, ip text, username text)') auth_db_connect.init = True return conn
python
def auth_db_connect(db_path): """ An SQLite database is used to store authentication transient data, this is tokens, strings of random data which are signed by the client, and session_tokens which identify authenticated users """ def dict_factory(cursor, row): return {col[0] : row[idx] for idx,col in enumerate(cursor.description)} conn = db.connect(db_path) conn.row_factory = dict_factory if not auth_db_connect.init: conn.execute('create table if not exists tokens (expires int, token text, ip text)') conn.execute('create table if not exists session_tokens (expires int, token text, ip text, username text)') auth_db_connect.init = True return conn
[ "def", "auth_db_connect", "(", "db_path", ")", ":", "def", "dict_factory", "(", "cursor", ",", "row", ")", ":", "return", "{", "col", "[", "0", "]", ":", "row", "[", "idx", "]", "for", "idx", ",", "col", "in", "enumerate", "(", "cursor", ".", "desc...
An SQLite database is used to store authentication transient data, this is tokens, strings of random data which are signed by the client, and session_tokens which identify authenticated users
[ "An", "SQLite", "database", "is", "used", "to", "store", "authentication", "transient", "data", "this", "is", "tokens", "strings", "of", "random", "data", "which", "are", "signed", "by", "the", "client", "and", "session_tokens", "which", "identify", "authenticat...
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L131-L143
robehickman/simple-http-file-sync
shttpfs/server.py
begin_auth
def begin_auth(): """ Request authentication token to sign """ repository = request.headers['repository'] if repository not in config['repositories']: return fail(no_such_repo_msg) # == repository_path = config['repositories'][repository]['path'] conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')); gc_tokens(conn) # Issue a new token auth_token = base64.b64encode(pysodium.randombytes(35)).decode('utf-8') conn.execute("insert into tokens (expires, token, ip) values (?,?,?)", (time.time() + 30, auth_token, request.environ['REMOTE_ADDR'])) conn.commit() return success({'auth_token' : auth_token})
python
def begin_auth(): """ Request authentication token to sign """ repository = request.headers['repository'] if repository not in config['repositories']: return fail(no_such_repo_msg) # == repository_path = config['repositories'][repository]['path'] conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')); gc_tokens(conn) # Issue a new token auth_token = base64.b64encode(pysodium.randombytes(35)).decode('utf-8') conn.execute("insert into tokens (expires, token, ip) values (?,?,?)", (time.time() + 30, auth_token, request.environ['REMOTE_ADDR'])) conn.commit() return success({'auth_token' : auth_token})
[ "def", "begin_auth", "(", ")", ":", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "if", "repository", "not", "in", "config", "[", "'repositories'", "]", ":", "return", "fail", "(", "no_such_repo_msg", ")", "# ==", "repository_path", ...
Request authentication token to sign
[ "Request", "authentication", "token", "to", "sign" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L155-L171
robehickman/simple-http-file-sync
shttpfs/server.py
authenticate
def authenticate(): """ This does two things, either validate a pre-existing session token or create a new one from a signed authentication token. """ client_ip = request.environ['REMOTE_ADDR'] repository = request.headers['repository'] if repository not in config['repositories']: return fail(no_such_repo_msg) # == repository_path = config['repositories'][repository]['path'] conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')); gc_tokens(conn) gc_tokens(conn) # Allow resume of an existing session if 'session_token' in request.headers: session_token = request.headers['session_token'] conn.execute("delete from session_tokens where expires < ?", (time.time(),)); conn.commit() res = conn.execute("select * from session_tokens where token = ? and ip = ?", (session_token, client_ip)).fetchall() if res != []: return success({'session_token' : session_token}) else: return fail(user_auth_fail_msg) # Create a new session else: user = request.headers['user'] auth_token = request.headers['auth_token'] signiture = request.headers['signature'] try: public_key = config['users'][user]['public_key'] # signature pysodium.crypto_sign_verify_detached(base64.b64decode(signiture), auth_token, base64.b64decode(public_key)) # check token was previously issued by this system and is still valid res = conn.execute("select * from tokens where token = ? and ip = ? ", (auth_token, client_ip)).fetchall() # Validate token matches one we sent if res == [] or len(res) > 1: return fail(user_auth_fail_msg) # Does the user have permission to use this repository? if repository not in config['users'][user]['uses_repositories']: return fail(user_auth_fail_msg) # Everything OK conn.execute("delete from tokens where token = ?", (auth_token,)); conn.commit() # generate a session token and send it to the client session_token = base64.b64encode(pysodium.randombytes(35)) conn.execute("insert into session_tokens (token, expires, ip, username) values (?,?,?, ?)", (session_token, time.time() + extend_session_duration, client_ip, user)) conn.commit() return success({'session_token' : session_token}) except Exception: # pylint: disable=broad-except return fail(user_auth_fail_msg)
python
def authenticate(): """ This does two things, either validate a pre-existing session token or create a new one from a signed authentication token. """ client_ip = request.environ['REMOTE_ADDR'] repository = request.headers['repository'] if repository not in config['repositories']: return fail(no_such_repo_msg) # == repository_path = config['repositories'][repository]['path'] conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')); gc_tokens(conn) gc_tokens(conn) # Allow resume of an existing session if 'session_token' in request.headers: session_token = request.headers['session_token'] conn.execute("delete from session_tokens where expires < ?", (time.time(),)); conn.commit() res = conn.execute("select * from session_tokens where token = ? and ip = ?", (session_token, client_ip)).fetchall() if res != []: return success({'session_token' : session_token}) else: return fail(user_auth_fail_msg) # Create a new session else: user = request.headers['user'] auth_token = request.headers['auth_token'] signiture = request.headers['signature'] try: public_key = config['users'][user]['public_key'] # signature pysodium.crypto_sign_verify_detached(base64.b64decode(signiture), auth_token, base64.b64decode(public_key)) # check token was previously issued by this system and is still valid res = conn.execute("select * from tokens where token = ? and ip = ? ", (auth_token, client_ip)).fetchall() # Validate token matches one we sent if res == [] or len(res) > 1: return fail(user_auth_fail_msg) # Does the user have permission to use this repository? if repository not in config['users'][user]['uses_repositories']: return fail(user_auth_fail_msg) # Everything OK conn.execute("delete from tokens where token = ?", (auth_token,)); conn.commit() # generate a session token and send it to the client session_token = base64.b64encode(pysodium.randombytes(35)) conn.execute("insert into session_tokens (token, expires, ip, username) values (?,?,?, ?)", (session_token, time.time() + extend_session_duration, client_ip, user)) conn.commit() return success({'session_token' : session_token}) except Exception: # pylint: disable=broad-except return fail(user_auth_fail_msg)
[ "def", "authenticate", "(", ")", ":", "client_ip", "=", "request", ".", "environ", "[", "'REMOTE_ADDR'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "if", "repository", "not", "in", "config", "[", "'repositories'", "]", ":", ...
This does two things, either validate a pre-existing session token or create a new one from a signed authentication token.
[ "This", "does", "two", "things", "either", "validate", "a", "pre", "-", "existing", "session", "token", "or", "create", "a", "new", "one", "from", "a", "signed", "authentication", "token", "." ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L176-L230
robehickman/simple-http-file-sync
shttpfs/server.py
have_authenticated_user
def have_authenticated_user(client_ip, repository, session_token): """ check user submitted session token against the db and that ip has not changed """ if repository not in config['repositories']: return False repository_path = config['repositories'][repository]['path'] conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')) # Garbage collect session tokens. We must not garbage collect the authentication token of the client # which is currently doing a commit. Large files can take a long time to upload and during this time, # the locks expiration is not being updated thus can expire. This is a problem here as session tokens # table is garbage collected every time a user authenticates. It does not matter if the user_lock # expires while the client also holds the flock, as it is updated to be in the future at the end of # the current operation. We exclude any tokens owned by the client which currently owns the user # lock for this reason. user_lock = read_user_lock(repository_path) active_commit = user_lock['session_token'] if user_lock != None else None if active_commit != None: conn.execute("delete from session_tokens where expires < ? and token != ?", (time.time(), active_commit)) else: conn.execute("delete from session_tokens where expires < ?", (time.time(),)) # Get the session token res = conn.execute("select * from session_tokens where token = ? and ip = ?", (session_token, client_ip)).fetchall() if res != [] and repository in config['users'][res[0]['username']]['uses_repositories']: conn.execute("update session_tokens set expires = ? where token = ? and ip = ?", (time.time() + extend_session_duration, session_token, client_ip)) conn.commit() # to make sure the update and delete have the same view return res[0] conn.commit() return False
python
def have_authenticated_user(client_ip, repository, session_token): """ check user submitted session token against the db and that ip has not changed """ if repository not in config['repositories']: return False repository_path = config['repositories'][repository]['path'] conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')) # Garbage collect session tokens. We must not garbage collect the authentication token of the client # which is currently doing a commit. Large files can take a long time to upload and during this time, # the locks expiration is not being updated thus can expire. This is a problem here as session tokens # table is garbage collected every time a user authenticates. It does not matter if the user_lock # expires while the client also holds the flock, as it is updated to be in the future at the end of # the current operation. We exclude any tokens owned by the client which currently owns the user # lock for this reason. user_lock = read_user_lock(repository_path) active_commit = user_lock['session_token'] if user_lock != None else None if active_commit != None: conn.execute("delete from session_tokens where expires < ? and token != ?", (time.time(), active_commit)) else: conn.execute("delete from session_tokens where expires < ?", (time.time(),)) # Get the session token res = conn.execute("select * from session_tokens where token = ? and ip = ?", (session_token, client_ip)).fetchall() if res != [] and repository in config['users'][res[0]['username']]['uses_repositories']: conn.execute("update session_tokens set expires = ? where token = ? and ip = ?", (time.time() + extend_session_duration, session_token, client_ip)) conn.commit() # to make sure the update and delete have the same view return res[0] conn.commit() return False
[ "def", "have_authenticated_user", "(", "client_ip", ",", "repository", ",", "session_token", ")", ":", "if", "repository", "not", "in", "config", "[", "'repositories'", "]", ":", "return", "False", "repository_path", "=", "config", "[", "'repositories'", "]", "[...
check user submitted session token against the db and that ip has not changed
[ "check", "user", "submitted", "session", "token", "against", "the", "db", "and", "that", "ip", "has", "not", "changed" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L234-L267
robehickman/simple-http-file-sync
shttpfs/server.py
find_changed
def find_changed(): """ Find changes since the revision it is currently holding """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] body_data = request.get_json() #=== data_store = versioned_storage(repository_path) head = data_store.get_head() if head == 'root': return success({}, {'head' : 'root', 'sorted_changes' : {'none' : []}}) # Find changed items client_changes = json.loads(body_data['client_changes']) server_changes = data_store.get_changes_since(request.headers["previous_revision"], head) # Resolve conflicts conflict_resolutions = json.loads(body_data['conflict_resolutions']) if conflict_resolutions != []: resolutions = {'server' : {},'client' : {}} for r in conflict_resolutions: if len(r['4_resolution']) != 1 or r['4_resolution'][0] not in ['client', 'server']: return fail(conflict_msg) resolutions[r['4_resolution'][0]][r['1_path']] = None client_changes = {k : v for k,v in client_changes.iteritems() if v['path'] not in resolutions['server']} server_changes = {k : v for k,v in server_changes.iteritems() if v['path'] not in resolutions['client']} sorted_changes = merge_client_and_server_changes(server_changes, client_changes) return success({}, {'head' : head, 'sorted_changes': sorted_changes})
python
def find_changed(): """ Find changes since the revision it is currently holding """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] body_data = request.get_json() #=== data_store = versioned_storage(repository_path) head = data_store.get_head() if head == 'root': return success({}, {'head' : 'root', 'sorted_changes' : {'none' : []}}) # Find changed items client_changes = json.loads(body_data['client_changes']) server_changes = data_store.get_changes_since(request.headers["previous_revision"], head) # Resolve conflicts conflict_resolutions = json.loads(body_data['conflict_resolutions']) if conflict_resolutions != []: resolutions = {'server' : {},'client' : {}} for r in conflict_resolutions: if len(r['4_resolution']) != 1 or r['4_resolution'][0] not in ['client', 'server']: return fail(conflict_msg) resolutions[r['4_resolution'][0]][r['1_path']] = None client_changes = {k : v for k,v in client_changes.iteritems() if v['path'] not in resolutions['server']} server_changes = {k : v for k,v in server_changes.iteritems() if v['path'] not in resolutions['client']} sorted_changes = merge_client_and_server_changes(server_changes, client_changes) return success({}, {'head' : head, 'sorted_changes': sorted_changes})
[ "def", "find_changed", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", "....
Find changes since the revision it is currently holding
[ "Find", "changes", "since", "the", "revision", "it", "is", "currently", "holding" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L274-L309
robehickman/simple-http-file-sync
shttpfs/server.py
pull_file
def pull_file(): """ Get a file from the server """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== data_store = versioned_storage(config['repositories'][repository]['path']) file_info = data_store.get_file_info_from_path(request.headers['path']) return success({'file_info_json' : json.dumps(file_info)}, send_from_directory(data_store.get_file_directory_path(file_info['hash']), file_info['hash'][2:]))
python
def pull_file(): """ Get a file from the server """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== data_store = versioned_storage(config['repositories'][repository]['path']) file_info = data_store.get_file_info_from_path(request.headers['path']) return success({'file_info_json' : json.dumps(file_info)}, send_from_directory(data_store.get_file_directory_path(file_info['hash']), file_info['hash'][2:]))
[ "def", "pull_file", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", ".", ...
Get a file from the server
[ "Get", "a", "file", "from", "the", "server" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L314-L330
robehickman/simple-http-file-sync
shttpfs/server.py
begin_commit
def begin_commit(): """ Allow a client to begin a commit and acquire the write lock """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): # The commit is locked for a given time period to a given session token, # a client must hold this lock to use any of push_file(), delete_files() and commit(). # It does not matter if the user lock technically expires while a client is writing # a large file, as the user lock is locked using flock for the duration of any # operation and thus cannot be stolen by another client. It is updated to be in # the future before returning to the client. The lock only needs to survive until # the client owning the lock sends another request and re acquires the flock. if not can_aquire_user_lock(repository_path, session_token): return fail(lock_fail_msg) # Commits can only take place if the committing user has the latest revision, # as committing from an outdated state could cause unexpected results, and may # have conflicts. Conflicts are resolved during a client update so they are # handled by the client, and a server interface for this is not needed. data_store = versioned_storage(repository_path) if data_store.get_head() != request.headers["previous_revision"]: return fail(need_to_update_msg) # Should the lock expire, the client which had the lock previously will be unable # to continue the commit it had in progress. When this, or another client attempts # to commit again it must do so by first obtaining the lock again by calling begin_commit(). # Any remaining commit data from failed prior commits is garbage collected here. # While it would technically be possible to implement commit resume should the same # client resume, I only see commits failing due to a network error and this is so # rare I don't think it's worth the trouble. if data_store.have_active_commit(): data_store.rollback() #------------ data_store.begin() update_user_lock(repository_path, session_token) return success() return lock_access(repository_path, with_exclusive_lock)
python
def begin_commit(): """ Allow a client to begin a commit and acquire the write lock """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): # The commit is locked for a given time period to a given session token, # a client must hold this lock to use any of push_file(), delete_files() and commit(). # It does not matter if the user lock technically expires while a client is writing # a large file, as the user lock is locked using flock for the duration of any # operation and thus cannot be stolen by another client. It is updated to be in # the future before returning to the client. The lock only needs to survive until # the client owning the lock sends another request and re acquires the flock. if not can_aquire_user_lock(repository_path, session_token): return fail(lock_fail_msg) # Commits can only take place if the committing user has the latest revision, # as committing from an outdated state could cause unexpected results, and may # have conflicts. Conflicts are resolved during a client update so they are # handled by the client, and a server interface for this is not needed. data_store = versioned_storage(repository_path) if data_store.get_head() != request.headers["previous_revision"]: return fail(need_to_update_msg) # Should the lock expire, the client which had the lock previously will be unable # to continue the commit it had in progress. When this, or another client attempts # to commit again it must do so by first obtaining the lock again by calling begin_commit(). # Any remaining commit data from failed prior commits is garbage collected here. # While it would technically be possible to implement commit resume should the same # client resume, I only see commits failing due to a network error and this is so # rare I don't think it's worth the trouble. if data_store.have_active_commit(): data_store.rollback() #------------ data_store.begin() update_user_lock(repository_path, session_token) return success() return lock_access(repository_path, with_exclusive_lock)
[ "def", "begin_commit", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", "....
Allow a client to begin a commit and acquire the write lock
[ "Allow", "a", "client", "to", "begin", "a", "commit", "and", "acquire", "the", "write", "lock" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L380-L425
robehickman/simple-http-file-sync
shttpfs/server.py
push_file
def push_file(): """ Push a file to the server """ #NOTE beware that reading post data in flask causes hang until file upload is complete session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) #=== data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) # There is no valid reason for path traversal characters to be in a file path within this system file_path = request.headers['path'] if any(True for item in re.split(r'\\|/', file_path) if item in ['..', '.']): return fail() #=== tmp_path = cpjoin(repository_path, 'tmp_file') with open(tmp_path, 'wb') as f: while True: chunk = request.stream.read(1000 * 1000) if chunk == b'': break f.write(chunk) #=== data_store.fs_put_from_file(tmp_path, {'path' : file_path}) # updates the user lock expiry update_user_lock(repository_path, session_token) return success() return lock_access(repository_path, with_exclusive_lock)
python
def push_file(): """ Push a file to the server """ #NOTE beware that reading post data in flask causes hang until file upload is complete session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) #=== data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) # There is no valid reason for path traversal characters to be in a file path within this system file_path = request.headers['path'] if any(True for item in re.split(r'\\|/', file_path) if item in ['..', '.']): return fail() #=== tmp_path = cpjoin(repository_path, 'tmp_file') with open(tmp_path, 'wb') as f: while True: chunk = request.stream.read(1000 * 1000) if chunk == b'': break f.write(chunk) #=== data_store.fs_put_from_file(tmp_path, {'path' : file_path}) # updates the user lock expiry update_user_lock(repository_path, session_token) return success() return lock_access(repository_path, with_exclusive_lock)
[ "def", "push_file", "(", ")", ":", "#NOTE beware that reading post data in flask causes hang until file upload is complete", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]",...
Push a file to the server
[ "Push", "a", "file", "to", "the", "server" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L431-L470
robehickman/simple-http-file-sync
shttpfs/server.py
delete_files
def delete_files(): """ Delete one or more files from the server """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] body_data = request.get_json() def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) try: data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) #------------- for fle in json.loads(body_data['files']): data_store.fs_delete(fle) # updates the user lock expiry update_user_lock(repository_path, session_token) return success() except Exception: return fail() # pylint: disable=broad-except return lock_access(repository_path, with_exclusive_lock)
python
def delete_files(): """ Delete one or more files from the server """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] body_data = request.get_json() def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) try: data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) #------------- for fle in json.loads(body_data['files']): data_store.fs_delete(fle) # updates the user lock expiry update_user_lock(repository_path, session_token) return success() except Exception: return fail() # pylint: disable=broad-except return lock_access(repository_path, with_exclusive_lock)
[ "def", "delete_files", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", "....
Delete one or more files from the server
[ "Delete", "one", "or", "more", "files", "from", "the", "server" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L475-L504
robehickman/simple-http-file-sync
shttpfs/server.py
commit
def commit(): """ Commit changes and release the write lock """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) #=== data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) result = {} if request.headers['mode'] == 'commit': new_head = data_store.commit(request.headers['commit_message'], current_user['username']) result = {'head' : new_head} else: data_store.rollback() # Release the user lock update_user_lock(repository_path, None) return success(result) return lock_access(repository_path, with_exclusive_lock)
python
def commit(): """ Commit changes and release the write lock """ session_token = request.headers['session_token'] repository = request.headers['repository'] #=== current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token) if current_user is False: return fail(user_auth_fail_msg) #=== repository_path = config['repositories'][repository]['path'] def with_exclusive_lock(): if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg) #=== data_store = versioned_storage(repository_path) if not data_store.have_active_commit(): return fail(no_active_commit_msg) result = {} if request.headers['mode'] == 'commit': new_head = data_store.commit(request.headers['commit_message'], current_user['username']) result = {'head' : new_head} else: data_store.rollback() # Release the user lock update_user_lock(repository_path, None) return success(result) return lock_access(repository_path, with_exclusive_lock)
[ "def", "commit", "(", ")", ":", "session_token", "=", "request", ".", "headers", "[", "'session_token'", "]", "repository", "=", "request", ".", "headers", "[", "'repository'", "]", "#===", "current_user", "=", "have_authenticated_user", "(", "request", ".", "...
Commit changes and release the write lock
[ "Commit", "changes", "and", "release", "the", "write", "lock" ]
train
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/server.py#L509-L539
appdotnet/ADNpy
adnpy/models.py
APIModel.serialize
def serialize(self): """ Converts :class:`adnpy.models.Model` into a normal dict without references to the api """ data = {} for k, v in self.iteritems(): if k.startswith('_'): continue if isinstance(v, APIModel): data[k] = v.serialize() elif v and is_seq_not_string(v) and isinstance(v[0], APIModel): data[k] = [x.serialize() for x in v] else: data[k] = v return data
python
def serialize(self): """ Converts :class:`adnpy.models.Model` into a normal dict without references to the api """ data = {} for k, v in self.iteritems(): if k.startswith('_'): continue if isinstance(v, APIModel): data[k] = v.serialize() elif v and is_seq_not_string(v) and isinstance(v[0], APIModel): data[k] = [x.serialize() for x in v] else: data[k] = v return data
[ "def", "serialize", "(", "self", ")", ":", "data", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "iteritems", "(", ")", ":", "if", "k", ".", "startswith", "(", "'_'", ")", ":", "continue", "if", "isinstance", "(", "v", ",", "APIModel", ...
Converts :class:`adnpy.models.Model` into a normal dict without references to the api
[ "Converts", ":", "class", ":", "adnpy", ".", "models", ".", "Model", "into", "a", "normal", "dict", "without", "references", "to", "the", "api" ]
train
https://github.com/appdotnet/ADNpy/blob/aedb181cd0d616257fac7b3676ac7d7211336118/adnpy/models.py#L105-L122
appdotnet/ADNpy
adnpy/models.py
APIModel.get_annotation
def get_annotation(self, key, result_format='list'): """ Is a convenience method for accessing annotations on models that have them """ value = self.get('_annotations_by_key', {}).get(key) if not value: return value if result_format == 'one': return value[0] return value
python
def get_annotation(self, key, result_format='list'): """ Is a convenience method for accessing annotations on models that have them """ value = self.get('_annotations_by_key', {}).get(key) if not value: return value if result_format == 'one': return value[0] return value
[ "def", "get_annotation", "(", "self", ",", "key", ",", "result_format", "=", "'list'", ")", ":", "value", "=", "self", ".", "get", "(", "'_annotations_by_key'", ",", "{", "}", ")", ".", "get", "(", "key", ")", "if", "not", "value", ":", "return", "va...
Is a convenience method for accessing annotations on models that have them
[ "Is", "a", "convenience", "method", "for", "accessing", "annotations", "on", "models", "that", "have", "them" ]
train
https://github.com/appdotnet/ADNpy/blob/aedb181cd0d616257fac7b3676ac7d7211336118/adnpy/models.py#L124-L135
appdotnet/ADNpy
adnpy/models.py
User.update_user
def update_user(self): """ Save the state of the current user """ # First create a copy of the current user user_dict = self.serialize() # Then delete the entities in the description field del user_dict['description']['entities'] # Then upload user_dict user, meta = self._api.update_user('me', data=user_dict)
python
def update_user(self): """ Save the state of the current user """ # First create a copy of the current user user_dict = self.serialize() # Then delete the entities in the description field del user_dict['description']['entities'] # Then upload user_dict user, meta = self._api.update_user('me', data=user_dict)
[ "def", "update_user", "(", "self", ")", ":", "# First create a copy of the current user", "user_dict", "=", "self", ".", "serialize", "(", ")", "# Then delete the entities in the description field", "del", "user_dict", "[", "'description'", "]", "[", "'entities'", "]", ...
Save the state of the current user
[ "Save", "the", "state", "of", "the", "current", "user" ]
train
https://github.com/appdotnet/ADNpy/blob/aedb181cd0d616257fac7b3676ac7d7211336118/adnpy/models.py#L161-L170
MacHu-GWU/constant2-project
constant2/_constant2.py
is_same_dict
def is_same_dict(d1, d2): """Test two dictionary is equal on values. (ignore order) """ for k, v in d1.items(): if isinstance(v, dict): is_same_dict(v, d2[k]) else: assert d1[k] == d2[k] for k, v in d2.items(): if isinstance(v, dict): is_same_dict(v, d1[k]) else: assert d1[k] == d2[k]
python
def is_same_dict(d1, d2): """Test two dictionary is equal on values. (ignore order) """ for k, v in d1.items(): if isinstance(v, dict): is_same_dict(v, d2[k]) else: assert d1[k] == d2[k] for k, v in d2.items(): if isinstance(v, dict): is_same_dict(v, d1[k]) else: assert d1[k] == d2[k]
[ "def", "is_same_dict", "(", "d1", ",", "d2", ")", ":", "for", "k", ",", "v", "in", "d1", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "is_same_dict", "(", "v", ",", "d2", "[", "k", "]", ")", "else", ":", ...
Test two dictionary is equal on values. (ignore order)
[ "Test", "two", "dictionary", "is", "equal", "on", "values", ".", "(", "ignore", "order", ")" ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L531-L544
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.Items
def Items(cls): """non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Items() [("a", 1), ("b", 2)] .. versionadded:: 0.0.5 """ l = list() for attr, value in get_all_attributes(cls): # if it's not a class(Constant) if not inspect.isclass(value): l.append((attr, value)) return list(sorted(l, key=lambda x: x[0]))
python
def Items(cls): """non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Items() [("a", 1), ("b", 2)] .. versionadded:: 0.0.5 """ l = list() for attr, value in get_all_attributes(cls): # if it's not a class(Constant) if not inspect.isclass(value): l.append((attr, value)) return list(sorted(l, key=lambda x: x[0]))
[ "def", "Items", "(", "cls", ")", ":", "l", "=", "list", "(", ")", "for", "attr", ",", "value", "in", "get_all_attributes", "(", "cls", ")", ":", "# if it's not a class(Constant)", "if", "not", "inspect", ".", "isclass", "(", "value", ")", ":", "l", "."...
non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Items() [("a", 1), ("b", 2)] .. versionadded:: 0.0.5
[ "non", "-", "class", "attributes", "ordered", "by", "alphabetical", "order", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L77-L103
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.items
def items(self): """non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.items() [("a", 1), ("b", 2)] .. versionchanged:: 0.0.5 """ l = list() # 为什么这里是 get_all_attributes(self.__class__) 而不是 # get_all_attributes(self) ? 因为有些实例不支持 # get_all_attributes(instance) 方法, 会报错。 # 所以我们从类里得到所有的属性信息, 然后获得这些属性在实例中 # 对应的值。 for attr, value in get_all_attributes(self.__class__): value = getattr(self, attr) # if it is not a instance of class(Constant) if not isinstance(value, Constant): l.append((attr, value)) return list(sorted(l, key=lambda x: x[0]))
python
def items(self): """non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.items() [("a", 1), ("b", 2)] .. versionchanged:: 0.0.5 """ l = list() # 为什么这里是 get_all_attributes(self.__class__) 而不是 # get_all_attributes(self) ? 因为有些实例不支持 # get_all_attributes(instance) 方法, 会报错。 # 所以我们从类里得到所有的属性信息, 然后获得这些属性在实例中 # 对应的值。 for attr, value in get_all_attributes(self.__class__): value = getattr(self, attr) # if it is not a instance of class(Constant) if not isinstance(value, Constant): l.append((attr, value)) return list(sorted(l, key=lambda x: x[0]))
[ "def", "items", "(", "self", ")", ":", "l", "=", "list", "(", ")", "# 为什么这里是 get_all_attributes(self.__class__) 而不是", "# get_all_attributes(self) ? 因为有些实例不支持", "# get_all_attributes(instance) 方法, 会报错。", "# 所以我们从类里得到所有的属性信息, 然后获得这些属性在实例中", "# 对应的值。", "for", "attr", ",", "value",...
non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.items() [("a", 1), ("b", 2)] .. versionchanged:: 0.0.5
[ "non", "-", "class", "attributes", "ordered", "by", "alphabetical", "order", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L105-L139
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.Subclasses
def Subclasses(cls, sort_by=None, reverse=False): """Get all nested Constant class and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Subclasses() [("C", MyClass.C), ("D", MyClass.D)] .. versionadded:: 0.0.3 """ l = list() for attr, value in get_all_attributes(cls): try: if issubclass(value, Constant): l.append((attr, value)) except: pass if sort_by is None: sort_by = "__creation_index__" l = list( sorted(l, key=lambda x: getattr(x[1], sort_by), reverse=reverse)) return l
python
def Subclasses(cls, sort_by=None, reverse=False): """Get all nested Constant class and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Subclasses() [("C", MyClass.C), ("D", MyClass.D)] .. versionadded:: 0.0.3 """ l = list() for attr, value in get_all_attributes(cls): try: if issubclass(value, Constant): l.append((attr, value)) except: pass if sort_by is None: sort_by = "__creation_index__" l = list( sorted(l, key=lambda x: getattr(x[1], sort_by), reverse=reverse)) return l
[ "def", "Subclasses", "(", "cls", ",", "sort_by", "=", "None", ",", "reverse", "=", "False", ")", ":", "l", "=", "list", "(", ")", "for", "attr", ",", "value", "in", "get_all_attributes", "(", "cls", ")", ":", "try", ":", "if", "issubclass", "(", "v...
Get all nested Constant class and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Subclasses() [("C", MyClass.C), ("D", MyClass.D)] .. versionadded:: 0.0.3
[ "Get", "all", "nested", "Constant", "class", "and", "it", "s", "name", "pair", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L190-L228
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.subclasses
def subclasses(self, sort_by=None, reverse=False): """Get all nested Constant class instance and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.subclasses() [("C", my_class.C), ("D", my_class.D)] .. versionadded:: 0.0.4 """ l = list() for attr, _ in self.Subclasses(sort_by, reverse): value = getattr(self, attr) l.append((attr, value)) return l
python
def subclasses(self, sort_by=None, reverse=False): """Get all nested Constant class instance and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.subclasses() [("C", my_class.C), ("D", my_class.D)] .. versionadded:: 0.0.4 """ l = list() for attr, _ in self.Subclasses(sort_by, reverse): value = getattr(self, attr) l.append((attr, value)) return l
[ "def", "subclasses", "(", "self", ",", "sort_by", "=", "None", ",", "reverse", "=", "False", ")", ":", "l", "=", "list", "(", ")", "for", "attr", ",", "_", "in", "self", ".", "Subclasses", "(", "sort_by", ",", "reverse", ")", ":", "value", "=", "...
Get all nested Constant class instance and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> my_class = MyClass() >>> my_class.subclasses() [("C", my_class.C), ("D", my_class.D)] .. versionadded:: 0.0.4
[ "Get", "all", "nested", "Constant", "class", "instance", "and", "it", "s", "name", "pair", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L230-L259
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.GetFirst
def GetFirst(cls, attr, value, e=0.000001, sort_by="__name__"): """Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5 """ for _, klass in cls.Subclasses(sort_by=sort_by): try: if klass.__dict__[attr] == approx(value, e): return klass except: pass return None
python
def GetFirst(cls, attr, value, e=0.000001, sort_by="__name__"): """Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5 """ for _, klass in cls.Subclasses(sort_by=sort_by): try: if klass.__dict__[attr] == approx(value, e): return klass except: pass return None
[ "def", "GetFirst", "(", "cls", ",", "attr", ",", "value", ",", "e", "=", "0.000001", ",", "sort_by", "=", "\"__name__\"", ")", ":", "for", "_", ",", "klass", "in", "cls", ".", "Subclasses", "(", "sort_by", "=", "sort_by", ")", ":", "try", ":", "if"...
Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5
[ "Get", "the", "first", "nested", "Constant", "class", "that", "met", "klass", ".", "attr", "==", "value", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L263-L280
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.get_first
def get_first(self, attr, value, e=0.000001, sort_by="__name__", reverse=False): """Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5 """ for _, klass in self.subclasses(sort_by, reverse): try: if getattr(klass, attr) == approx(value, e): return klass except: pass return None
python
def get_first(self, attr, value, e=0.000001, sort_by="__name__", reverse=False): """Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5 """ for _, klass in self.subclasses(sort_by, reverse): try: if getattr(klass, attr) == approx(value, e): return klass except: pass return None
[ "def", "get_first", "(", "self", ",", "attr", ",", "value", ",", "e", "=", "0.000001", ",", "sort_by", "=", "\"__name__\"", ",", "reverse", "=", "False", ")", ":", "for", "_", ",", "klass", "in", "self", ".", "subclasses", "(", "sort_by", ",", "rever...
Get the first nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5
[ "Get", "the", "first", "nested", "Constant", "class", "that", "met", "klass", ".", "attr", "==", "value", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L282-L300
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.GetAll
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"): """Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5 """ matched = list() for _, klass in cls.Subclasses(sort_by=sort_by): try: if klass.__dict__[attr] == approx(value, e): matched.append(klass) except: # pragma: no cover pass return matched
python
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"): """Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5 """ matched = list() for _, klass in cls.Subclasses(sort_by=sort_by): try: if klass.__dict__[attr] == approx(value, e): matched.append(klass) except: # pragma: no cover pass return matched
[ "def", "GetAll", "(", "cls", ",", "attr", ",", "value", ",", "e", "=", "0.000001", ",", "sort_by", "=", "\"__name__\"", ")", ":", "matched", "=", "list", "(", ")", "for", "_", ",", "klass", "in", "cls", ".", "Subclasses", "(", "sort_by", "=", "sort...
Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionadded:: 0.0.5
[ "Get", "all", "nested", "Constant", "class", "that", "met", "klass", ".", "attr", "==", "value", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L304-L322
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.get_all
def get_all(self, attr, value, e=0.000001, sort_by="__name__", reverse=False): """Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5 """ matched = list() for _, klass in self.subclasses(sort_by, reverse): try: if getattr(klass, attr) == approx(value, e): matched.append(klass) except: # pragma: no cover pass return matched
python
def get_all(self, attr, value, e=0.000001, sort_by="__name__", reverse=False): """Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5 """ matched = list() for _, klass in self.subclasses(sort_by, reverse): try: if getattr(klass, attr) == approx(value, e): matched.append(klass) except: # pragma: no cover pass return matched
[ "def", "get_all", "(", "self", ",", "attr", ",", "value", ",", "e", "=", "0.000001", ",", "sort_by", "=", "\"__name__\"", ",", "reverse", "=", "False", ")", ":", "matched", "=", "list", "(", ")", "for", "_", ",", "klass", "in", "self", ".", "subcla...
Get all nested Constant class that met ``klass.attr == value``. :param attr: attribute name. :param value: value. :param e: used for float value comparison. :param sort_by: nested class is ordered by <sort_by> attribute. .. versionchanged:: 0.0.5
[ "Get", "all", "nested", "Constant", "class", "that", "met", "klass", ".", "attr", "==", "value", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L324-L343
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.BackAssign
def BackAssign(cls, other_entity_klass, this_entity_backpopulate_field, other_entity_backpopulate_field, is_many_to_one=False): """ Assign defined one side mapping relationship to other side. For example, each employee belongs to one department, then one department includes many employees. If you defined each employee's department, this method will assign employees to ``Department.employees`` field. This is an one to many (department to employee) example. Another example would be, each employee has multiple tags. If you defined tags for each employee, this method will assign employees to ``Tag.employees`` field. This is and many to many (employee to tag) example. Support: - many to many mapping - one to many mapping :param other_entity_klass: a :class:`Constant` class. :param this_entity_backpopulate_field: str :param other_entity_backpopulate_field: str :param is_many_to_one: bool :return: """ data = dict() for _, other_klass in other_entity_klass.Subclasses(): other_field_value = getattr( other_klass, this_entity_backpopulate_field) if isinstance(other_field_value, (tuple, list)): for self_klass in other_field_value: self_key = self_klass.__name__ try: data[self_key].append(other_klass) except KeyError: data[self_key] = [other_klass, ] else: if other_field_value is not None: self_klass = other_field_value self_key = self_klass.__name__ try: data[self_key].append(other_klass) except KeyError: data[self_key] = [other_klass, ] if is_many_to_one: new_data = dict() for key, value in data.items(): try: new_data[key] = value[0] except: # pragma: no cover pass data = new_data for self_key, other_klass_list in data.items(): setattr(getattr(cls, self_key), other_entity_backpopulate_field, other_klass_list)
python
def BackAssign(cls, other_entity_klass, this_entity_backpopulate_field, other_entity_backpopulate_field, is_many_to_one=False): """ Assign defined one side mapping relationship to other side. For example, each employee belongs to one department, then one department includes many employees. If you defined each employee's department, this method will assign employees to ``Department.employees`` field. This is an one to many (department to employee) example. Another example would be, each employee has multiple tags. If you defined tags for each employee, this method will assign employees to ``Tag.employees`` field. This is and many to many (employee to tag) example. Support: - many to many mapping - one to many mapping :param other_entity_klass: a :class:`Constant` class. :param this_entity_backpopulate_field: str :param other_entity_backpopulate_field: str :param is_many_to_one: bool :return: """ data = dict() for _, other_klass in other_entity_klass.Subclasses(): other_field_value = getattr( other_klass, this_entity_backpopulate_field) if isinstance(other_field_value, (tuple, list)): for self_klass in other_field_value: self_key = self_klass.__name__ try: data[self_key].append(other_klass) except KeyError: data[self_key] = [other_klass, ] else: if other_field_value is not None: self_klass = other_field_value self_key = self_klass.__name__ try: data[self_key].append(other_klass) except KeyError: data[self_key] = [other_klass, ] if is_many_to_one: new_data = dict() for key, value in data.items(): try: new_data[key] = value[0] except: # pragma: no cover pass data = new_data for self_key, other_klass_list in data.items(): setattr(getattr(cls, self_key), other_entity_backpopulate_field, other_klass_list)
[ "def", "BackAssign", "(", "cls", ",", "other_entity_klass", ",", "this_entity_backpopulate_field", ",", "other_entity_backpopulate_field", ",", "is_many_to_one", "=", "False", ")", ":", "data", "=", "dict", "(", ")", "for", "_", ",", "other_klass", "in", "other_en...
Assign defined one side mapping relationship to other side. For example, each employee belongs to one department, then one department includes many employees. If you defined each employee's department, this method will assign employees to ``Department.employees`` field. This is an one to many (department to employee) example. Another example would be, each employee has multiple tags. If you defined tags for each employee, this method will assign employees to ``Tag.employees`` field. This is and many to many (employee to tag) example. Support: - many to many mapping - one to many mapping :param other_entity_klass: a :class:`Constant` class. :param this_entity_backpopulate_field: str :param other_entity_backpopulate_field: str :param is_many_to_one: bool :return:
[ "Assign", "defined", "one", "side", "mapping", "relationship", "to", "other", "side", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L373-L432
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.dump
def dump(cls): """Dump data into a dict. .. versionadded:: 0.0.2 """ d = OrderedDict(cls.Items()) d["__classname__"] = cls.__name__ for attr, klass in cls.Subclasses(): d[attr] = klass.dump() return OrderedDict([(cls.__name__, d)])
python
def dump(cls): """Dump data into a dict. .. versionadded:: 0.0.2 """ d = OrderedDict(cls.Items()) d["__classname__"] = cls.__name__ for attr, klass in cls.Subclasses(): d[attr] = klass.dump() return OrderedDict([(cls.__name__, d)])
[ "def", "dump", "(", "cls", ")", ":", "d", "=", "OrderedDict", "(", "cls", ".", "Items", "(", ")", ")", "d", "[", "\"__classname__\"", "]", "=", "cls", ".", "__name__", "for", "attr", ",", "klass", "in", "cls", ".", "Subclasses", "(", ")", ":", "d...
Dump data into a dict. .. versionadded:: 0.0.2
[ "Dump", "data", "into", "a", "dict", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L435-L444
MacHu-GWU/constant2-project
constant2/_constant2.py
_Constant.load
def load(cls, data): """Construct a Constant class from it's dict data. .. versionadded:: 0.0.2 """ if len(data) == 1: for key, value in data.items(): if "__classname__" not in value: # pragma: no cover raise ValueError name = key bases = (Constant,) attrs = dict() for k, v in value.items(): if isinstance(v, dict): if "__classname__" in v: attrs[k] = cls.load({k: v}) else: attrs[k] = v else: attrs[k] = v return type(name, bases, attrs) else: # pragma: no cover raise ValueError
python
def load(cls, data): """Construct a Constant class from it's dict data. .. versionadded:: 0.0.2 """ if len(data) == 1: for key, value in data.items(): if "__classname__" not in value: # pragma: no cover raise ValueError name = key bases = (Constant,) attrs = dict() for k, v in value.items(): if isinstance(v, dict): if "__classname__" in v: attrs[k] = cls.load({k: v}) else: attrs[k] = v else: attrs[k] = v return type(name, bases, attrs) else: # pragma: no cover raise ValueError
[ "def", "load", "(", "cls", ",", "data", ")", ":", "if", "len", "(", "data", ")", "==", "1", ":", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "\"__classname__\"", "not", "in", "value", ":", "# pragma: no cover", "rai...
Construct a Constant class from it's dict data. .. versionadded:: 0.0.2
[ "Construct", "a", "Constant", "class", "from", "it", "s", "dict", "data", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L447-L469
hobson/pug-ann
pug/ann/data/weather.py
hourly
def hourly(location='Fresno, CA', days=1, start=None, end=None, years=1, use_cache=True, verbosity=1): """ Get detailed (hourly) weather data for the requested days and location The Weather Underground URL for Fresno, CA on 1/1/2011 is: http://www.wunderground.com/history/airport/KFAT/2011/1/1/DailyHistory.html?MR=1&format=1 This will fail periodically on Travis, b/c wunderground says "No daily or hourly history data available" >> df = hourly('Fresno, CA', verbosity=-1) >> 1 <= len(df) <= 24 * 2 True The time zone of the client where this is used to compose the first column label, hence the ellipsis >> df.columns # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Index([u'Time... >> df = hourly('Fresno, CA', days=5, verbosity=-1) >> 24 * 4 <= len(df) <= 24 * (5 + 1) * 2 True """ airport_code = airport(location, default=location) if isinstance(days, int): start = start or None end = end or datetime.datetime.today().date() days = pd.date_range(start=start, end=end, periods=days) # refresh the cache each calendar month or each change in the number of days in the dataset cache_path = 'hourly-{}-{}-{:02d}-{:04d}.csv'.format(airport_code, days[-1].year, days[-1].month, len(days)) cache_path = os.path.join(CACHE_PATH, cache_path) if use_cache: try: return pd.DataFrame.from_csv(cache_path) except: pass df = pd.DataFrame() for day in days: url = ('http://www.wunderground.com/history/airport/{airport_code}/{year}/{month}/{day}/DailyHistory.html?MR=1&format=1'.format( airport_code=airport_code, year=day.year, month=day.month, day=day.day)) if verbosity > 1: print('GETing *.CSV using "{0}"'.format(url)) buf = urllib.urlopen(url).read() if verbosity > 0: N = buf.count('\n') M = (buf.count(',') + N) / float(N) print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format( airport_code, N, int(round(M)), int(round(M)) * N)) if (buf.count('\n') > 2) or ((buf.count('\n') > 1) and buf.split('\n')[1].count(',') > 0): table = util.read_csv(buf, format='header+values-list', numbers=True) columns = [s.strip() for s in table[0]] table = table[1:] tzs = [s[4:] for s in columns if (s[5:] in ['ST', 'DT'] and s[4] in 'PMCE' and s[:4].lower() == 'time')] if tzs: tz = tzs[0] else: tz = 'UTC' for rownum, row in enumerate(table): try: table[rownum] = [util.make_tz_aware(row[0], tz)] + row[1:] except ValueError: pass dates = [row[-1] for row in table] if not all(isinstance(date, (datetime.datetime, pd.Timestamp)) for date in dates): dates = [row[0] for row in table] if len(columns) == len(table[0]): df0 = pd.DataFrame(table, columns=columns, index=dates) df = df.append(df0) elif verbosity >= 0: msg = "The number of columns in the 1st row of the table:\n {}\n doesn't match the number of column labels:\n {}\n".format( table[0], columns) msg += "Wunderground.com probably can't find the airport: {} ({})\n or the date: {}\n in its database.\n".format( airport_code, location, day) msg += "Attempted a GET request using the URI:\n {0}\n".format(url) warnings.warn(msg) try: df.to_csv(cache_path) except: if verbosity > 0 and use_cache: from traceback import print_exc print_exc() warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path)) return df
python
def hourly(location='Fresno, CA', days=1, start=None, end=None, years=1, use_cache=True, verbosity=1): """ Get detailed (hourly) weather data for the requested days and location The Weather Underground URL for Fresno, CA on 1/1/2011 is: http://www.wunderground.com/history/airport/KFAT/2011/1/1/DailyHistory.html?MR=1&format=1 This will fail periodically on Travis, b/c wunderground says "No daily or hourly history data available" >> df = hourly('Fresno, CA', verbosity=-1) >> 1 <= len(df) <= 24 * 2 True The time zone of the client where this is used to compose the first column label, hence the ellipsis >> df.columns # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Index([u'Time... >> df = hourly('Fresno, CA', days=5, verbosity=-1) >> 24 * 4 <= len(df) <= 24 * (5 + 1) * 2 True """ airport_code = airport(location, default=location) if isinstance(days, int): start = start or None end = end or datetime.datetime.today().date() days = pd.date_range(start=start, end=end, periods=days) # refresh the cache each calendar month or each change in the number of days in the dataset cache_path = 'hourly-{}-{}-{:02d}-{:04d}.csv'.format(airport_code, days[-1].year, days[-1].month, len(days)) cache_path = os.path.join(CACHE_PATH, cache_path) if use_cache: try: return pd.DataFrame.from_csv(cache_path) except: pass df = pd.DataFrame() for day in days: url = ('http://www.wunderground.com/history/airport/{airport_code}/{year}/{month}/{day}/DailyHistory.html?MR=1&format=1'.format( airport_code=airport_code, year=day.year, month=day.month, day=day.day)) if verbosity > 1: print('GETing *.CSV using "{0}"'.format(url)) buf = urllib.urlopen(url).read() if verbosity > 0: N = buf.count('\n') M = (buf.count(',') + N) / float(N) print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format( airport_code, N, int(round(M)), int(round(M)) * N)) if (buf.count('\n') > 2) or ((buf.count('\n') > 1) and buf.split('\n')[1].count(',') > 0): table = util.read_csv(buf, format='header+values-list', numbers=True) columns = [s.strip() for s in table[0]] table = table[1:] tzs = [s[4:] for s in columns if (s[5:] in ['ST', 'DT'] and s[4] in 'PMCE' and s[:4].lower() == 'time')] if tzs: tz = tzs[0] else: tz = 'UTC' for rownum, row in enumerate(table): try: table[rownum] = [util.make_tz_aware(row[0], tz)] + row[1:] except ValueError: pass dates = [row[-1] for row in table] if not all(isinstance(date, (datetime.datetime, pd.Timestamp)) for date in dates): dates = [row[0] for row in table] if len(columns) == len(table[0]): df0 = pd.DataFrame(table, columns=columns, index=dates) df = df.append(df0) elif verbosity >= 0: msg = "The number of columns in the 1st row of the table:\n {}\n doesn't match the number of column labels:\n {}\n".format( table[0], columns) msg += "Wunderground.com probably can't find the airport: {} ({})\n or the date: {}\n in its database.\n".format( airport_code, location, day) msg += "Attempted a GET request using the URI:\n {0}\n".format(url) warnings.warn(msg) try: df.to_csv(cache_path) except: if verbosity > 0 and use_cache: from traceback import print_exc print_exc() warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path)) return df
[ "def", "hourly", "(", "location", "=", "'Fresno, CA'", ",", "days", "=", "1", ",", "start", "=", "None", ",", "end", "=", "None", ",", "years", "=", "1", ",", "use_cache", "=", "True", ",", "verbosity", "=", "1", ")", ":", "airport_code", "=", "air...
Get detailed (hourly) weather data for the requested days and location The Weather Underground URL for Fresno, CA on 1/1/2011 is: http://www.wunderground.com/history/airport/KFAT/2011/1/1/DailyHistory.html?MR=1&format=1 This will fail periodically on Travis, b/c wunderground says "No daily or hourly history data available" >> df = hourly('Fresno, CA', verbosity=-1) >> 1 <= len(df) <= 24 * 2 True The time zone of the client where this is used to compose the first column label, hence the ellipsis >> df.columns # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Index([u'Time... >> df = hourly('Fresno, CA', days=5, verbosity=-1) >> 24 * 4 <= len(df) <= 24 * (5 + 1) * 2 True
[ "Get", "detailed", "(", "hourly", ")", "weather", "data", "for", "the", "requested", "days", "and", "location" ]
train
https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/data/weather.py#L24-L107
hobson/pug-ann
pug/ann/data/weather.py
api
def api(feature='conditions', city='Portland', state='OR', key=None): """Use the wunderground API to get current conditions instead of scraping Please be kind and use your own key (they're FREE!): http://www.wunderground.com/weather/api/d/login.html References: http://www.wunderground.com/weather/api/d/terms.html Examples: >>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS {u'currenthurricane': ...}}} >>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' + ... 'planner rawtide satellite tide webcams yesterday').split(' ') >> everything = [api(f, 'Portland') for f in features] >> js = api('alerts', 'Portland', 'OR') >> js = api('condit', 'Sacramento', 'CA') >> js = api('forecast', 'Mobile', 'AL') >> js = api('10day', 'Fairhope', 'AL') >> js = api('geo', 'Decatur', 'AL') >> js = api('hist', 'history', 'AL') >> js = api('astro') """ features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' + 'planner rawtide satellite tide webcams yesterday').split(' ') feature = util.fuzzy_get(features, feature) # Please be kind and use your own key (they're FREE!): # http://www.wunderground.com/weather/api/d/login.html key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1) url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format( key=key, feature=feature, state=state, city=city) return json.load(urllib.urlopen(url))
python
def api(feature='conditions', city='Portland', state='OR', key=None): """Use the wunderground API to get current conditions instead of scraping Please be kind and use your own key (they're FREE!): http://www.wunderground.com/weather/api/d/login.html References: http://www.wunderground.com/weather/api/d/terms.html Examples: >>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS {u'currenthurricane': ...}}} >>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' + ... 'planner rawtide satellite tide webcams yesterday').split(' ') >> everything = [api(f, 'Portland') for f in features] >> js = api('alerts', 'Portland', 'OR') >> js = api('condit', 'Sacramento', 'CA') >> js = api('forecast', 'Mobile', 'AL') >> js = api('10day', 'Fairhope', 'AL') >> js = api('geo', 'Decatur', 'AL') >> js = api('hist', 'history', 'AL') >> js = api('astro') """ features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' + 'planner rawtide satellite tide webcams yesterday').split(' ') feature = util.fuzzy_get(features, feature) # Please be kind and use your own key (they're FREE!): # http://www.wunderground.com/weather/api/d/login.html key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1) url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format( key=key, feature=feature, state=state, city=city) return json.load(urllib.urlopen(url))
[ "def", "api", "(", "feature", "=", "'conditions'", ",", "city", "=", "'Portland'", ",", "state", "=", "'OR'", ",", "key", "=", "None", ")", ":", "features", "=", "(", "'alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10...
Use the wunderground API to get current conditions instead of scraping Please be kind and use your own key (they're FREE!): http://www.wunderground.com/weather/api/d/login.html References: http://www.wunderground.com/weather/api/d/terms.html Examples: >>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS {u'currenthurricane': ...}}} >>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' + ... 'planner rawtide satellite tide webcams yesterday').split(' ') >> everything = [api(f, 'Portland') for f in features] >> js = api('alerts', 'Portland', 'OR') >> js = api('condit', 'Sacramento', 'CA') >> js = api('forecast', 'Mobile', 'AL') >> js = api('10day', 'Fairhope', 'AL') >> js = api('geo', 'Decatur', 'AL') >> js = api('hist', 'history', 'AL') >> js = api('astro')
[ "Use", "the", "wunderground", "API", "to", "get", "current", "conditions", "instead", "of", "scraping" ]
train
https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/data/weather.py#L110-L142
hobson/pug-ann
pug/ann/data/weather.py
daily
def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1): """Retrieve weather for the indicated airport code or 'City, ST' string. >>> df = daily('Camas, WA', verbosity=-1) >>> 365 <= len(df) <= 365 * 2 + 1 True Sacramento data has gaps (airport KMCC): 8/21/2013 is missing from 2013. Whole months are missing from 2014. >>> df = daily('Sacramento, CA', years=[2013], verbosity=-1) >>> 364 <= len(df) <= 365 True >>> df.columns Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ... """ this_year = datetime.date.today().year if isinstance(years, (int, float)): # current (incomplete) year doesn't count in total number of years # so 0 would return this calendar year's weather data years = np.arange(0, int(years) + 1) years = sorted(years) if not all(1900 <= yr <= this_year for yr in years): years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1] airport_code = airport(location, default=location) # refresh the cache each time the start or end year changes cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1]) cache_path = os.path.join(CACHE_PATH, cache_path) if use_cache: try: return pd.DataFrame.from_csv(cache_path) except: pass df = pd.DataFrame() for year in years: url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' + 'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' + '&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format( airport=airport_code, yearstart=year, yearend=year ) if verbosity > 1: print('GETing *.CSV using "{0}"'.format(url)) buf = urllib.urlopen(url).read() if verbosity > 0: N = buf.count('\n') M = (buf.count(',') + N) / float(N) print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format( airport_code, N, int(round(M)), int(round(M)) * N)) if verbosity > 2: print(buf) table = util.read_csv(buf, format='header+values-list', numbers=True) # # clean up the last column (if it contains <br> tags) table = [util.strip_br(row) if len(row) > 1 else row for row in table] # numcols = max(len(row) for row in table) # table = [row for row in table if len(row) == numcols] columns = table.pop(0) tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')] dates = [float('nan')] * len(table) for i, row in enumerate(table): for j, value in enumerate(row): if not value and value is not None: value = 0 continue if columns[j] in tzs: table[i][j] = util.make_tz_aware(value, tz=columns[j]) if isinstance(table[i][j], datetime.datetime): dates[i] = table[i][j] continue try: table[i][j] = float(value) if not (table[i][j] % 1): table[i][j] = int(table[i][j]) except: pass df0 = pd.DataFrame(table, columns=columns, index=dates) df = df.append(df0) if verbosity > 1: print(df) try: df.to_csv(cache_path) except: if verbosity > 0 and use_cache: from traceback import print_exc print_exc() warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path)) return df
python
def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1): """Retrieve weather for the indicated airport code or 'City, ST' string. >>> df = daily('Camas, WA', verbosity=-1) >>> 365 <= len(df) <= 365 * 2 + 1 True Sacramento data has gaps (airport KMCC): 8/21/2013 is missing from 2013. Whole months are missing from 2014. >>> df = daily('Sacramento, CA', years=[2013], verbosity=-1) >>> 364 <= len(df) <= 365 True >>> df.columns Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ... """ this_year = datetime.date.today().year if isinstance(years, (int, float)): # current (incomplete) year doesn't count in total number of years # so 0 would return this calendar year's weather data years = np.arange(0, int(years) + 1) years = sorted(years) if not all(1900 <= yr <= this_year for yr in years): years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1] airport_code = airport(location, default=location) # refresh the cache each time the start or end year changes cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1]) cache_path = os.path.join(CACHE_PATH, cache_path) if use_cache: try: return pd.DataFrame.from_csv(cache_path) except: pass df = pd.DataFrame() for year in years: url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' + 'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' + '&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format( airport=airport_code, yearstart=year, yearend=year ) if verbosity > 1: print('GETing *.CSV using "{0}"'.format(url)) buf = urllib.urlopen(url).read() if verbosity > 0: N = buf.count('\n') M = (buf.count(',') + N) / float(N) print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format( airport_code, N, int(round(M)), int(round(M)) * N)) if verbosity > 2: print(buf) table = util.read_csv(buf, format='header+values-list', numbers=True) # # clean up the last column (if it contains <br> tags) table = [util.strip_br(row) if len(row) > 1 else row for row in table] # numcols = max(len(row) for row in table) # table = [row for row in table if len(row) == numcols] columns = table.pop(0) tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')] dates = [float('nan')] * len(table) for i, row in enumerate(table): for j, value in enumerate(row): if not value and value is not None: value = 0 continue if columns[j] in tzs: table[i][j] = util.make_tz_aware(value, tz=columns[j]) if isinstance(table[i][j], datetime.datetime): dates[i] = table[i][j] continue try: table[i][j] = float(value) if not (table[i][j] % 1): table[i][j] = int(table[i][j]) except: pass df0 = pd.DataFrame(table, columns=columns, index=dates) df = df.append(df0) if verbosity > 1: print(df) try: df.to_csv(cache_path) except: if verbosity > 0 and use_cache: from traceback import print_exc print_exc() warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path)) return df
[ "def", "daily", "(", "location", "=", "'Fresno, CA'", ",", "years", "=", "1", ",", "use_cache", "=", "True", ",", "verbosity", "=", "1", ")", ":", "this_year", "=", "datetime", ".", "date", ".", "today", "(", ")", ".", "year", "if", "isinstance", "("...
Retrieve weather for the indicated airport code or 'City, ST' string. >>> df = daily('Camas, WA', verbosity=-1) >>> 365 <= len(df) <= 365 * 2 + 1 True Sacramento data has gaps (airport KMCC): 8/21/2013 is missing from 2013. Whole months are missing from 2014. >>> df = daily('Sacramento, CA', years=[2013], verbosity=-1) >>> 364 <= len(df) <= 365 True >>> df.columns Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ...
[ "Retrieve", "weather", "for", "the", "indicated", "airport", "code", "or", "City", "ST", "string", "." ]
train
https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/data/weather.py#L145-L238
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.ensure_cache_folder
def ensure_cache_folder(self): """ Creates a gradle cache folder if it does not exist. """ if os.path.exists(self.cache_folder) is False: os.makedirs(self.cache_folder)
python
def ensure_cache_folder(self): """ Creates a gradle cache folder if it does not exist. """ if os.path.exists(self.cache_folder) is False: os.makedirs(self.cache_folder)
[ "def", "ensure_cache_folder", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "cache_folder", ")", "is", "False", ":", "os", ".", "makedirs", "(", "self", ".", "cache_folder", ")" ]
Creates a gradle cache folder if it does not exist.
[ "Creates", "a", "gradle", "cache", "folder", "if", "it", "does", "not", "exist", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L17-L22
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.is_app_folder
def is_app_folder(self, folder): """ checks if a folder """ with open('%s/%s/build.gradle' % (self.path, folder)) as f: for line in f.readlines(): if config.gradle_plugin in line: return True return False
python
def is_app_folder(self, folder): """ checks if a folder """ with open('%s/%s/build.gradle' % (self.path, folder)) as f: for line in f.readlines(): if config.gradle_plugin in line: return True return False
[ "def", "is_app_folder", "(", "self", ",", "folder", ")", ":", "with", "open", "(", "'%s/%s/build.gradle'", "%", "(", "self", ".", "path", ",", "folder", ")", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "con...
checks if a folder
[ "checks", "if", "a", "folder" ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L24-L32
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.get_src_folder
def get_src_folder(self): """ Gets the app source folder from settings.gradle file. Returns: A string containing the project source folder name (default is "app") """ with open('%s/settings.gradle' % self.path) as f: for line in f.readlines(): if line.startswith('include'): matches = re.findall(r'\'\:?(.+?)\'', line) if len(matches) == 0: continue for folder in matches: if self.is_app_folder(folder): return folder return 'app'
python
def get_src_folder(self): """ Gets the app source folder from settings.gradle file. Returns: A string containing the project source folder name (default is "app") """ with open('%s/settings.gradle' % self.path) as f: for line in f.readlines(): if line.startswith('include'): matches = re.findall(r'\'\:?(.+?)\'', line) if len(matches) == 0: continue for folder in matches: if self.is_app_folder(folder): return folder return 'app'
[ "def", "get_src_folder", "(", "self", ")", ":", "with", "open", "(", "'%s/settings.gradle'", "%", "self", ".", "path", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "'include'", ...
Gets the app source folder from settings.gradle file. Returns: A string containing the project source folder name (default is "app")
[ "Gets", "the", "app", "source", "folder", "from", "settings", ".", "gradle", "file", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L34-L50
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.get_build_tool_version
def get_build_tool_version(self): """ Gets the build tool version to be used by zipalign from build.gradle file. Returns: A string containing the build tool version, default is 23.0.2. """ with open('%s/%s/build.gradle' % (self.path, self.src_folder)) as f: for line in f.readlines(): if 'buildToolsVersion' in line: matches = re.findall(r'buildToolsVersion \"(.+?)\"', line) if len(matches) == 1: return matches[0] return config.build_tool_version
python
def get_build_tool_version(self): """ Gets the build tool version to be used by zipalign from build.gradle file. Returns: A string containing the build tool version, default is 23.0.2. """ with open('%s/%s/build.gradle' % (self.path, self.src_folder)) as f: for line in f.readlines(): if 'buildToolsVersion' in line: matches = re.findall(r'buildToolsVersion \"(.+?)\"', line) if len(matches) == 1: return matches[0] return config.build_tool_version
[ "def", "get_build_tool_version", "(", "self", ")", ":", "with", "open", "(", "'%s/%s/build.gradle'", "%", "(", "self", ".", "path", ",", "self", ".", "src_folder", ")", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "...
Gets the build tool version to be used by zipalign from build.gradle file. Returns: A string containing the build tool version, default is 23.0.2.
[ "Gets", "the", "build", "tool", "version", "to", "be", "used", "by", "zipalign", "from", "build", ".", "gradle", "file", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L52-L65
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.sign
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'): """ Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign """ self.src_folder = self.get_src_folder() if keystore is None: (keystore, storepass, keypass, alias) = android_helper.get_default_keystore() dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name) android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path) android_helper.zipalign(apk, dist, build_tool=self.get_build_tool_version(), path=self.path)
python
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'): """ Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign """ self.src_folder = self.get_src_folder() if keystore is None: (keystore, storepass, keypass, alias) = android_helper.get_default_keystore() dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name) android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path) android_helper.zipalign(apk, dist, build_tool=self.get_build_tool_version(), path=self.path)
[ "def", "sign", "(", "self", ",", "storepass", "=", "None", ",", "keypass", "=", "None", ",", "keystore", "=", "None", ",", "apk", "=", "None", ",", "alias", "=", "None", ",", "name", "=", "'app'", ")", ":", "self", ".", "src_folder", "=", "self", ...
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign
[ "Signs", "(", "jarsign", "and", "zipalign", ")", "a", "target", "apk", "file", "based", "on", "keystore", "information", "uses", "default", "debug", "keystore", "file", "by", "default", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L67-L83
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.validate
def validate(self): """ Validates the app project before the build. This is the first step in the build process. Needs to be implemented by the subclass. """ if os.path.exists('%s/gradlew' % self.path) is False: raise errors.InvalidProjectStructure(message='Missing gradlew project root folder') self.touch_log('validate')
python
def validate(self): """ Validates the app project before the build. This is the first step in the build process. Needs to be implemented by the subclass. """ if os.path.exists('%s/gradlew' % self.path) is False: raise errors.InvalidProjectStructure(message='Missing gradlew project root folder') self.touch_log('validate')
[ "def", "validate", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "'%s/gradlew'", "%", "self", ".", "path", ")", "is", "False", ":", "raise", "errors", ".", "InvalidProjectStructure", "(", "message", "=", "'Missing gradlew project root f...
Validates the app project before the build. This is the first step in the build process. Needs to be implemented by the subclass.
[ "Validates", "the", "app", "project", "before", "the", "build", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L85-L96
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.prepare
def prepare(self): """ Prepares the android project to the build process. Checks if the project uses either gradle or ant and executes the necessary steps. """ self.src_folder = self.get_src_folder() st = os.stat('%s/gradlew' % self.path) os.chmod('%s/gradlew' % self.path, st.st_mode | stat.S_IEXEC)
python
def prepare(self): """ Prepares the android project to the build process. Checks if the project uses either gradle or ant and executes the necessary steps. """ self.src_folder = self.get_src_folder() st = os.stat('%s/gradlew' % self.path) os.chmod('%s/gradlew' % self.path, st.st_mode | stat.S_IEXEC)
[ "def", "prepare", "(", "self", ")", ":", "self", ".", "src_folder", "=", "self", ".", "get_src_folder", "(", ")", "st", "=", "os", ".", "stat", "(", "'%s/gradlew'", "%", "self", ".", "path", ")", "os", ".", "chmod", "(", "'%s/gradlew'", "%", "self", ...
Prepares the android project to the build process. Checks if the project uses either gradle or ant and executes the necessary steps.
[ "Prepares", "the", "android", "project", "to", "the", "build", "process", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L98-L106
aerogear/digger-build-cli
digger/builds/gradle.py
GradleBuild.build
def build(self, mode='debug'): """ Builds the app project after the execution of validate and prepare. This is the third and last step in the build process. Needs to be implemented by the subclass. """ self.ensure_cache_folder() ref = { 'debug': 'assembleDebug', 'release': 'assembleRelease' } cmd = [ './gradlew', ref.get(mode, mode), '--gradle-user-home', self.cache_folder ] self.run_cmd(cmd, 'build')
python
def build(self, mode='debug'): """ Builds the app project after the execution of validate and prepare. This is the third and last step in the build process. Needs to be implemented by the subclass. """ self.ensure_cache_folder() ref = { 'debug': 'assembleDebug', 'release': 'assembleRelease' } cmd = [ './gradlew', ref.get(mode, mode), '--gradle-user-home', self.cache_folder ] self.run_cmd(cmd, 'build')
[ "def", "build", "(", "self", ",", "mode", "=", "'debug'", ")", ":", "self", ".", "ensure_cache_folder", "(", ")", "ref", "=", "{", "'debug'", ":", "'assembleDebug'", ",", "'release'", ":", "'assembleRelease'", "}", "cmd", "=", "[", "'./gradlew'", ",", "r...
Builds the app project after the execution of validate and prepare. This is the third and last step in the build process. Needs to be implemented by the subclass.
[ "Builds", "the", "app", "project", "after", "the", "execution", "of", "validate", "and", "prepare", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/gradle.py#L108-L127
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/sql_io.py
smart_insert
def smart_insert(df, table, engine, minimal_size=5): """An optimized Insert strategy. **中文文档** 一种优化的将大型DataFrame中的数据, 在有IntegrityError的情况下将所有 好数据存入数据库的方法。 """ from sqlalchemy.exc import IntegrityError try: table_name = table.name except: table_name = table # 首先进行尝试bulk insert try: df.to_sql(table_name, engine, index=False, if_exists="append") # 失败了 except IntegrityError: # 分析数据量 n = df.shape[0] # 如果数据条数多于一定数量 if n >= minimal_size ** 2: # 则进行分包 n_chunk = math.floor(math.sqrt(n)) for sub_df in grouper_df(df, n_chunk): smart_insert( sub_df, table_name, engine, minimal_size) # 否则则一条条地逐条插入 else: for sub_df in grouper_df(df, 1): try: sub_df.to_sql( table_name, engine, index=False, if_exists="append") except IntegrityError: pass
python
def smart_insert(df, table, engine, minimal_size=5): """An optimized Insert strategy. **中文文档** 一种优化的将大型DataFrame中的数据, 在有IntegrityError的情况下将所有 好数据存入数据库的方法。 """ from sqlalchemy.exc import IntegrityError try: table_name = table.name except: table_name = table # 首先进行尝试bulk insert try: df.to_sql(table_name, engine, index=False, if_exists="append") # 失败了 except IntegrityError: # 分析数据量 n = df.shape[0] # 如果数据条数多于一定数量 if n >= minimal_size ** 2: # 则进行分包 n_chunk = math.floor(math.sqrt(n)) for sub_df in grouper_df(df, n_chunk): smart_insert( sub_df, table_name, engine, minimal_size) # 否则则一条条地逐条插入 else: for sub_df in grouper_df(df, 1): try: sub_df.to_sql( table_name, engine, index=False, if_exists="append") except IntegrityError: pass
[ "def", "smart_insert", "(", "df", ",", "table", ",", "engine", ",", "minimal_size", "=", "5", ")", ":", "from", "sqlalchemy", ".", "exc", "import", "IntegrityError", "try", ":", "table_name", "=", "table", ".", "name", "except", ":", "table_name", "=", "...
An optimized Insert strategy. **中文文档** 一种优化的将大型DataFrame中的数据, 在有IntegrityError的情况下将所有 好数据存入数据库的方法。
[ "An", "optimized", "Insert", "strategy", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/sql_io.py#L16-L52
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/sql_io.py
excel_to_sql
def excel_to_sql(excel_file_path, engine, read_excel_kwargs=None, to_generic_type_kwargs=None, to_sql_kwargs=None): """Create a database from excel. :param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method. example: ``{"employee": {"skiprows": 10}, "department": {}}`` :param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql`` method. limitation: 1. If a integer column has None value, data type in database will be float. Because pandas thinks that it is ``np.nan``. 2. If a string column looks like integer, ``pandas.read_excel()`` method doesn't have options to convert it to string. """ if read_excel_kwargs is None: read_excel_kwargs = dict() if to_sql_kwargs is None: to_sql_kwargs = dict() if to_generic_type_kwargs is None: to_generic_type_kwargs = dict() xl = pd.ExcelFile(excel_file_path) for sheet_name in xl.sheet_names: df = pd.read_excel( excel_file_path, sheet_name, **read_excel_kwargs.get(sheet_name, dict()) ) kwargs = to_generic_type_kwargs.get(sheet_name) if kwargs: data = to_dict_list_generic_type(df, **kwargs) smart_insert(data, sheet_name, engine) else: df.to_sql( sheet_name, engine, index=False, **to_sql_kwargs.get(sheet_name, dict(if_exists="replace")) )
python
def excel_to_sql(excel_file_path, engine, read_excel_kwargs=None, to_generic_type_kwargs=None, to_sql_kwargs=None): """Create a database from excel. :param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method. example: ``{"employee": {"skiprows": 10}, "department": {}}`` :param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql`` method. limitation: 1. If a integer column has None value, data type in database will be float. Because pandas thinks that it is ``np.nan``. 2. If a string column looks like integer, ``pandas.read_excel()`` method doesn't have options to convert it to string. """ if read_excel_kwargs is None: read_excel_kwargs = dict() if to_sql_kwargs is None: to_sql_kwargs = dict() if to_generic_type_kwargs is None: to_generic_type_kwargs = dict() xl = pd.ExcelFile(excel_file_path) for sheet_name in xl.sheet_names: df = pd.read_excel( excel_file_path, sheet_name, **read_excel_kwargs.get(sheet_name, dict()) ) kwargs = to_generic_type_kwargs.get(sheet_name) if kwargs: data = to_dict_list_generic_type(df, **kwargs) smart_insert(data, sheet_name, engine) else: df.to_sql( sheet_name, engine, index=False, **to_sql_kwargs.get(sheet_name, dict(if_exists="replace")) )
[ "def", "excel_to_sql", "(", "excel_file_path", ",", "engine", ",", "read_excel_kwargs", "=", "None", ",", "to_generic_type_kwargs", "=", "None", ",", "to_sql_kwargs", "=", "None", ")", ":", "if", "read_excel_kwargs", "is", "None", ":", "read_excel_kwargs", "=", ...
Create a database from excel. :param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method. example: ``{"employee": {"skiprows": 10}, "department": {}}`` :param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql`` method. limitation: 1. If a integer column has None value, data type in database will be float. Because pandas thinks that it is ``np.nan``. 2. If a string column looks like integer, ``pandas.read_excel()`` method doesn't have options to convert it to string.
[ "Create", "a", "database", "from", "excel", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/sql_io.py#L55-L97
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/sql_io.py
database_to_excel
def database_to_excel(engine, excel_file_path): """Export database to excel. :param engine: :param excel_file_path: """ from sqlalchemy import MetaData, select metadata = MetaData() metadata.reflect(engine) writer = pd.ExcelWriter(excel_file_path) for table in metadata.tables.values(): sql = select([table]) df = pd.read_sql(sql, engine) df.to_excel(writer, table.name, index=False) writer.save()
python
def database_to_excel(engine, excel_file_path): """Export database to excel. :param engine: :param excel_file_path: """ from sqlalchemy import MetaData, select metadata = MetaData() metadata.reflect(engine) writer = pd.ExcelWriter(excel_file_path) for table in metadata.tables.values(): sql = select([table]) df = pd.read_sql(sql, engine) df.to_excel(writer, table.name, index=False) writer.save()
[ "def", "database_to_excel", "(", "engine", ",", "excel_file_path", ")", ":", "from", "sqlalchemy", "import", "MetaData", ",", "select", "metadata", "=", "MetaData", "(", ")", "metadata", ".", "reflect", "(", "engine", ")", "writer", "=", "pd", ".", "ExcelWri...
Export database to excel. :param engine: :param excel_file_path:
[ "Export", "database", "to", "excel", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/sql_io.py#L100-L117
steenzout/python-object
setup.py
requirements
def requirements(requirements_file): """Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file. """ return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
python
def requirements(requirements_file): """Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file. """ return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
[ "def", "requirements", "(", "requirements_file", ")", ":", "return", "[", "str", "(", "pkg", ".", "req", ")", "for", "pkg", "in", "parse_requirements", "(", "requirements_file", ",", "session", "=", "pip_download", ".", "PipSession", "(", ")", ")", "if", "...
Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file.
[ "Return", "packages", "mentioned", "in", "the", "given", "file", "." ]
train
https://github.com/steenzout/python-object/blob/b865e3eeb4c2435923cf900d3ef2a89c1b35fe18/setup.py#L19-L30
mozilla/socorrolib
socorrolib/lib/ooid.py
createNewOoid
def createNewOoid(timestamp=None, depth=None): """Create a new Ooid for a given time, to be stored at a given depth timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding 24 random hex digits and encoded date and depth info """ if not timestamp: timestamp = utc_now().date() if not depth: depth = defaultDepth assert depth <= 4 and depth >=1 uuid = str(uu.uuid4()) return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day)
python
def createNewOoid(timestamp=None, depth=None): """Create a new Ooid for a given time, to be stored at a given depth timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding 24 random hex digits and encoded date and depth info """ if not timestamp: timestamp = utc_now().date() if not depth: depth = defaultDepth assert depth <= 4 and depth >=1 uuid = str(uu.uuid4()) return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day)
[ "def", "createNewOoid", "(", "timestamp", "=", "None", ",", "depth", "=", "None", ")", ":", "if", "not", "timestamp", ":", "timestamp", "=", "utc_now", "(", ")", ".", "date", "(", ")", "if", "not", "depth", ":", "depth", "=", "defaultDepth", "assert", ...
Create a new Ooid for a given time, to be stored at a given depth timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding 24 random hex digits and encoded date and depth info
[ "Create", "a", "new", "Ooid", "for", "a", "given", "time", "to", "be", "stored", "at", "a", "given", "depth", "timestamp", ":", "the", "year", "-", "month", "-", "day", "is", "encoded", "in", "the", "ooid", ".", "If", "none", "use", "current", "day",...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/ooid.py#L14-L26
mozilla/socorrolib
socorrolib/lib/ooid.py
uuidToOoid
def uuidToOoid(uuid,timestamp=None, depth= None): """ Create an ooid from a 32-hex-digit string in regular uuid format. uuid: must be uuid in expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxx7777777 timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding the first 24 digits of the provided uuid and encoded date and depth info """ if not timestamp: timestamp = utc_now().date() if not depth: depth = defaultDepth assert depth <= 4 and depth >=1 return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day)
python
def uuidToOoid(uuid,timestamp=None, depth= None): """ Create an ooid from a 32-hex-digit string in regular uuid format. uuid: must be uuid in expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxx7777777 timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding the first 24 digits of the provided uuid and encoded date and depth info """ if not timestamp: timestamp = utc_now().date() if not depth: depth = defaultDepth assert depth <= 4 and depth >=1 return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day)
[ "def", "uuidToOoid", "(", "uuid", ",", "timestamp", "=", "None", ",", "depth", "=", "None", ")", ":", "if", "not", "timestamp", ":", "timestamp", "=", "utc_now", "(", ")", ".", "date", "(", ")", "if", "not", "depth", ":", "depth", "=", "defaultDepth"...
Create an ooid from a 32-hex-digit string in regular uuid format. uuid: must be uuid in expected format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxx7777777 timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding the first 24 digits of the provided uuid and encoded date and depth info
[ "Create", "an", "ooid", "from", "a", "32", "-", "hex", "-", "digit", "string", "in", "regular", "uuid", "format", ".", "uuid", ":", "must", "be", "uuid", "in", "expected", "format", ":", "xxxxxxxx", "-", "xxxx", "-", "xxxx", "-", "xxxx", "-", "xxxxx7...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/ooid.py#L28-L40
mozilla/socorrolib
socorrolib/lib/ooid.py
dateAndDepthFromOoid
def dateAndDepthFromOoid(ooid): """ Extract the encoded date and expected storage depth from an ooid. ooid: The ooid from which to extract the info returns (datetime(yyyy,mm,dd),depth) if the ooid is in expected format else (None,None) """ year = month = day = None try: day = int(ooid[-2:]) except: return None,None try: month = int(ooid[-4:-2]) except: return None,None try: year = 2000 + int(ooid[-6:-4]) depth = int(ooid[-7]) if not depth: depth = oldHardDepth return (dt.datetime(year,month,day,tzinfo=UTC),depth) except: return None,None return None,None
python
def dateAndDepthFromOoid(ooid): """ Extract the encoded date and expected storage depth from an ooid. ooid: The ooid from which to extract the info returns (datetime(yyyy,mm,dd),depth) if the ooid is in expected format else (None,None) """ year = month = day = None try: day = int(ooid[-2:]) except: return None,None try: month = int(ooid[-4:-2]) except: return None,None try: year = 2000 + int(ooid[-6:-4]) depth = int(ooid[-7]) if not depth: depth = oldHardDepth return (dt.datetime(year,month,day,tzinfo=UTC),depth) except: return None,None return None,None
[ "def", "dateAndDepthFromOoid", "(", "ooid", ")", ":", "year", "=", "month", "=", "day", "=", "None", "try", ":", "day", "=", "int", "(", "ooid", "[", "-", "2", ":", "]", ")", "except", ":", "return", "None", ",", "None", "try", ":", "month", "=",...
Extract the encoded date and expected storage depth from an ooid. ooid: The ooid from which to extract the info returns (datetime(yyyy,mm,dd),depth) if the ooid is in expected format else (None,None)
[ "Extract", "the", "encoded", "date", "and", "expected", "storage", "depth", "from", "an", "ooid", ".", "ooid", ":", "The", "ooid", "from", "which", "to", "extract", "the", "info", "returns", "(", "datetime", "(", "yyyy", "mm", "dd", ")", "depth", ")", ...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/ooid.py#L42-L63
benley/butcher
butcher/gitrepo.py
RepoState.HeadList
def HeadList(self): """Return a list of all the currently loaded repo HEAD objects.""" return [(rname, repo.currenthead) for rname, repo in self.repos.items() ]
python
def HeadList(self): """Return a list of all the currently loaded repo HEAD objects.""" return [(rname, repo.currenthead) for rname, repo in self.repos.items() ]
[ "def", "HeadList", "(", "self", ")", ":", "return", "[", "(", "rname", ",", "repo", ".", "currenthead", ")", "for", "rname", ",", "repo", "in", "self", ".", "repos", ".", "items", "(", ")", "]" ]
Return a list of all the currently loaded repo HEAD objects.
[ "Return", "a", "list", "of", "all", "the", "currently", "loaded", "repo", "HEAD", "objects", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L75-L78
benley/butcher
butcher/gitrepo.py
GitRepo.setorigin
def setorigin(self): """Set the 'origin' remote to the upstream url that we trust.""" try: origin = self.repo.remotes.origin if origin.url != self.origin_url: log.debug('[%s] Changing origin url. Old: %s New: %s', self.name, origin.url, self.origin_url) origin.config_writer.set('url', self.origin_url) except AttributeError: origin = self.repo.create_remote('origin', self.origin_url) log.debug('[%s] Created remote "origin" with URL: %s', self.name, origin.url)
python
def setorigin(self): """Set the 'origin' remote to the upstream url that we trust.""" try: origin = self.repo.remotes.origin if origin.url != self.origin_url: log.debug('[%s] Changing origin url. Old: %s New: %s', self.name, origin.url, self.origin_url) origin.config_writer.set('url', self.origin_url) except AttributeError: origin = self.repo.create_remote('origin', self.origin_url) log.debug('[%s] Created remote "origin" with URL: %s', self.name, origin.url)
[ "def", "setorigin", "(", "self", ")", ":", "try", ":", "origin", "=", "self", ".", "repo", ".", "remotes", ".", "origin", "if", "origin", ".", "url", "!=", "self", ".", "origin_url", ":", "log", ".", "debug", "(", "'[%s] Changing origin url. Old: %s New: %...
Set the 'origin' remote to the upstream url that we trust.
[ "Set", "the", "origin", "remote", "to", "the", "upstream", "url", "that", "we", "trust", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L121-L132
benley/butcher
butcher/gitrepo.py
GitRepo.fetchall
def fetchall(self): """Fetch all refs from the upstream repo.""" try: self.repo.remotes.origin.fetch() except git.exc.GitCommandError as err: raise GitError(err)
python
def fetchall(self): """Fetch all refs from the upstream repo.""" try: self.repo.remotes.origin.fetch() except git.exc.GitCommandError as err: raise GitError(err)
[ "def", "fetchall", "(", "self", ")", ":", "try", ":", "self", ".", "repo", ".", "remotes", ".", "origin", ".", "fetch", "(", ")", "except", "git", ".", "exc", ".", "GitCommandError", "as", "err", ":", "raise", "GitError", "(", "err", ")" ]
Fetch all refs from the upstream repo.
[ "Fetch", "all", "refs", "from", "the", "upstream", "repo", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L134-L139
benley/butcher
butcher/gitrepo.py
GitRepo.fetchref
def fetchref(self, ref): """Fetch a particular git ref.""" log.debug('[%s] Fetching ref: %s', self.name, ref) fetch_info = self.repo.remotes.origin.fetch(ref).pop() return fetch_info.ref
python
def fetchref(self, ref): """Fetch a particular git ref.""" log.debug('[%s] Fetching ref: %s', self.name, ref) fetch_info = self.repo.remotes.origin.fetch(ref).pop() return fetch_info.ref
[ "def", "fetchref", "(", "self", ",", "ref", ")", ":", "log", ".", "debug", "(", "'[%s] Fetching ref: %s'", ",", "self", ".", "name", ",", "ref", ")", "fetch_info", "=", "self", ".", "repo", ".", "remotes", ".", "origin", ".", "fetch", "(", "ref", ")"...
Fetch a particular git ref.
[ "Fetch", "a", "particular", "git", "ref", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L141-L145
benley/butcher
butcher/gitrepo.py
GitRepo.sethead
def sethead(self, ref): """Set head to a git ref.""" log.debug('[%s] Setting to ref %s', self.name, ref) try: ref = self.repo.rev_parse(ref) except gitdb.exc.BadObject: # Probably means we don't have it cached yet. # So maybe we can fetch it. ref = self.fetchref(ref) log.debug('[%s] Setting head to %s', self.name, ref) self.repo.head.reset(ref, working_tree=True) log.debug('[%s] Head object: %s', self.name, self.currenthead)
python
def sethead(self, ref): """Set head to a git ref.""" log.debug('[%s] Setting to ref %s', self.name, ref) try: ref = self.repo.rev_parse(ref) except gitdb.exc.BadObject: # Probably means we don't have it cached yet. # So maybe we can fetch it. ref = self.fetchref(ref) log.debug('[%s] Setting head to %s', self.name, ref) self.repo.head.reset(ref, working_tree=True) log.debug('[%s] Head object: %s', self.name, self.currenthead)
[ "def", "sethead", "(", "self", ",", "ref", ")", ":", "log", ".", "debug", "(", "'[%s] Setting to ref %s'", ",", "self", ".", "name", ",", "ref", ")", "try", ":", "ref", "=", "self", ".", "repo", ".", "rev_parse", "(", "ref", ")", "except", "gitdb", ...
Set head to a git ref.
[ "Set", "head", "to", "a", "git", "ref", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L147-L158
benley/butcher
butcher/gitrepo.py
GitRepo.get_file
def get_file(self, filename): """Get a file from the repo. Returns a file-like stream with the data. """ log.debug('[%s]: reading: //%s/%s', self.name, self.name, filename) try: blob = self.repo.head.commit.tree/filename return blob.data_stream except KeyError as err: raise GitError(err)
python
def get_file(self, filename): """Get a file from the repo. Returns a file-like stream with the data. """ log.debug('[%s]: reading: //%s/%s', self.name, self.name, filename) try: blob = self.repo.head.commit.tree/filename return blob.data_stream except KeyError as err: raise GitError(err)
[ "def", "get_file", "(", "self", ",", "filename", ")", ":", "log", ".", "debug", "(", "'[%s]: reading: //%s/%s'", ",", "self", ".", "name", ",", "self", ".", "name", ",", "filename", ")", "try", ":", "blob", "=", "self", ".", "repo", ".", "head", ".",...
Get a file from the repo. Returns a file-like stream with the data.
[ "Get", "a", "file", "from", "the", "repo", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/gitrepo.py#L165-L175
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/log.py
Logger.show_progress
def show_progress(self, message=None): """If we are in a progress scope, and no log messages have been shown, write out another '.'""" if self.in_progress_hanging: if message is None: sys.stdout.write('.') sys.stdout.flush() else: if self.last_message: padding = ' ' * max(0, len(self.last_message)-len(message)) else: padding = '' sys.stdout.write('\r%s%s%s%s' % (' '*self.indent, self.in_progress, message, padding)) sys.stdout.flush() self.last_message = message
python
def show_progress(self, message=None): """If we are in a progress scope, and no log messages have been shown, write out another '.'""" if self.in_progress_hanging: if message is None: sys.stdout.write('.') sys.stdout.flush() else: if self.last_message: padding = ' ' * max(0, len(self.last_message)-len(message)) else: padding = '' sys.stdout.write('\r%s%s%s%s' % (' '*self.indent, self.in_progress, message, padding)) sys.stdout.flush() self.last_message = message
[ "def", "show_progress", "(", "self", ",", "message", "=", "None", ")", ":", "if", "self", ".", "in_progress_hanging", ":", "if", "message", "is", "None", ":", "sys", ".", "stdout", ".", "write", "(", "'.'", ")", "sys", ".", "stdout", ".", "flush", "(...
If we are in a progress scope, and no log messages have been shown, write out another '.
[ "If", "we", "are", "in", "a", "progress", "scope", "and", "no", "log", "messages", "have", "been", "shown", "write", "out", "another", "." ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/log.py#L112-L126
gchina/funclog
funclog/funclog.py
funclog
def funclog(logger): """A decorator function that provides debug input/output logging.""" # check if logger is from structlog use_structlog = False if STRUCTLOG: if isinstance(logger, structlog._config.BoundLoggerLazyProxy): real_logger = logger use_structlog = True # If a Logger object is passed in, use that. Otherwise, get the default # Logger. if use_structlog: pass elif isinstance(logger, Logger): real_logger = logger else: real_logger = getLogger() # __qualname__ is prettier but it didn't get added until 3.5 name_attr = '__name__' if sys.version_info < (3, 5) else '__qualname__' def get_arg_string(args, kwargs): """Convert args and kwargs to a pretty string.""" return ', '.join(["'{}'".format(a) if type(a) == str else '{}'.format(a) for a in args] + ["{}='{}'".format(a, v) if type(v) == str else '{}={}'.format(a, v) for a, v in sorted(kwargs.items())]) def real_decorator(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): frame_info = inspect.getframeinfo(inspect.stack()[1][0]) filename = os.path.basename(frame_info.filename) lineno = frame_info.lineno func_name = getattr(fn, name_attr) arg_string = get_arg_string(args, kwargs) source_info = '{}:{}:{}({})'.format(filename, lineno, func_name, arg_string) if use_structlog: real_logger.debug(u'calling', source_info=source_info) else: real_logger.debug(u'calling %s', source_info) try: res = fn(*args, **kwargs) except Exception as e: if use_structlog: real_logger.exception( u'{} threw exception'.format(source_info), e=e) else: real_logger.exception( u'%s threw exception:\n%s', source_info, e) raise if use_structlog: real_logger.debug(u'{} returned'.format(source_info), res=res) else: real_logger.debug(u'%s returned: %s', source_info, res) return res return wrapper if type(logger) == type(real_decorator): return real_decorator(logger) return real_decorator
python
def funclog(logger): """A decorator function that provides debug input/output logging.""" # check if logger is from structlog use_structlog = False if STRUCTLOG: if isinstance(logger, structlog._config.BoundLoggerLazyProxy): real_logger = logger use_structlog = True # If a Logger object is passed in, use that. Otherwise, get the default # Logger. if use_structlog: pass elif isinstance(logger, Logger): real_logger = logger else: real_logger = getLogger() # __qualname__ is prettier but it didn't get added until 3.5 name_attr = '__name__' if sys.version_info < (3, 5) else '__qualname__' def get_arg_string(args, kwargs): """Convert args and kwargs to a pretty string.""" return ', '.join(["'{}'".format(a) if type(a) == str else '{}'.format(a) for a in args] + ["{}='{}'".format(a, v) if type(v) == str else '{}={}'.format(a, v) for a, v in sorted(kwargs.items())]) def real_decorator(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): frame_info = inspect.getframeinfo(inspect.stack()[1][0]) filename = os.path.basename(frame_info.filename) lineno = frame_info.lineno func_name = getattr(fn, name_attr) arg_string = get_arg_string(args, kwargs) source_info = '{}:{}:{}({})'.format(filename, lineno, func_name, arg_string) if use_structlog: real_logger.debug(u'calling', source_info=source_info) else: real_logger.debug(u'calling %s', source_info) try: res = fn(*args, **kwargs) except Exception as e: if use_structlog: real_logger.exception( u'{} threw exception'.format(source_info), e=e) else: real_logger.exception( u'%s threw exception:\n%s', source_info, e) raise if use_structlog: real_logger.debug(u'{} returned'.format(source_info), res=res) else: real_logger.debug(u'%s returned: %s', source_info, res) return res return wrapper if type(logger) == type(real_decorator): return real_decorator(logger) return real_decorator
[ "def", "funclog", "(", "logger", ")", ":", "# check if logger is from structlog", "use_structlog", "=", "False", "if", "STRUCTLOG", ":", "if", "isinstance", "(", "logger", ",", "structlog", ".", "_config", ".", "BoundLoggerLazyProxy", ")", ":", "real_logger", "=",...
A decorator function that provides debug input/output logging.
[ "A", "decorator", "function", "that", "provides", "debug", "input", "/", "output", "logging", "." ]
train
https://github.com/gchina/funclog/blob/4f91146c9bdc026d2844bdf555a8fc082456e42f/funclog/funclog.py#L24-L86
vadimk2016/v-vk-api
v_vk_api/__init__.py
create
def create(app_id: int = None, login: str = None, password: str = None, service_token: str = None, proxies: dict = None) -> API: """ Creates an API instance, requires app ID, login and password or service token to create connection :param app_id: int: specifies app ID :param login: str: specifies login, can be phone number or email :param password: str: specifies password :param service_token: str: specifies password service token :param proxies: dict: specifies proxies, require http and https proxy """ session_ = APISession(app_id, login, password, service_token, proxies) return API(session_)
python
def create(app_id: int = None, login: str = None, password: str = None, service_token: str = None, proxies: dict = None) -> API: """ Creates an API instance, requires app ID, login and password or service token to create connection :param app_id: int: specifies app ID :param login: str: specifies login, can be phone number or email :param password: str: specifies password :param service_token: str: specifies password service token :param proxies: dict: specifies proxies, require http and https proxy """ session_ = APISession(app_id, login, password, service_token, proxies) return API(session_)
[ "def", "create", "(", "app_id", ":", "int", "=", "None", ",", "login", ":", "str", "=", "None", ",", "password", ":", "str", "=", "None", ",", "service_token", ":", "str", "=", "None", ",", "proxies", ":", "dict", "=", "None", ")", "->", "API", "...
Creates an API instance, requires app ID, login and password or service token to create connection :param app_id: int: specifies app ID :param login: str: specifies login, can be phone number or email :param password: str: specifies password :param service_token: str: specifies password service token :param proxies: dict: specifies proxies, require http and https proxy
[ "Creates", "an", "API", "instance", "requires", "app", "ID", "login", "and", "password", "or", "service", "token", "to", "create", "connection" ]
train
https://github.com/vadimk2016/v-vk-api/blob/ef5656e09944b5319a1f573cfb7b022f3d31c0cf/v_vk_api/__init__.py#L9-L29
benley/butcher
butcher/util.py
user_homedir
def user_homedir(username=None): """Returns a user's home directory. If no username is specified, returns the current user's homedir. """ if username: return os.path.expanduser('~%s/' % username) elif 'HOME' in os.environ: return os.environ['HOME'] elif os.name == 'posix': return os.path.expanduser('~/') else: raise RuntimeError('This function has failed at life.')
python
def user_homedir(username=None): """Returns a user's home directory. If no username is specified, returns the current user's homedir. """ if username: return os.path.expanduser('~%s/' % username) elif 'HOME' in os.environ: return os.environ['HOME'] elif os.name == 'posix': return os.path.expanduser('~/') else: raise RuntimeError('This function has failed at life.')
[ "def", "user_homedir", "(", "username", "=", "None", ")", ":", "if", "username", ":", "return", "os", ".", "path", ".", "expanduser", "(", "'~%s/'", "%", "username", ")", "elif", "'HOME'", "in", "os", ".", "environ", ":", "return", "os", ".", "environ"...
Returns a user's home directory. If no username is specified, returns the current user's homedir.
[ "Returns", "a", "user", "s", "home", "directory", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L12-L24
benley/butcher
butcher/util.py
hash_stream
def hash_stream(fileobj, hasher=None, blocksize=65536): """Read from fileobj stream, return hash of its contents. Args: fileobj: File-like object with read() hasher: Hash object such as hashlib.sha1(). Defaults to sha1. blocksize: Read from fileobj this many bytes at a time. """ hasher = hasher or hashlib.sha1() buf = fileobj.read(blocksize) while buf: hasher.update(buf) buf = fileobj.read(blocksize) return hasher
python
def hash_stream(fileobj, hasher=None, blocksize=65536): """Read from fileobj stream, return hash of its contents. Args: fileobj: File-like object with read() hasher: Hash object such as hashlib.sha1(). Defaults to sha1. blocksize: Read from fileobj this many bytes at a time. """ hasher = hasher or hashlib.sha1() buf = fileobj.read(blocksize) while buf: hasher.update(buf) buf = fileobj.read(blocksize) return hasher
[ "def", "hash_stream", "(", "fileobj", ",", "hasher", "=", "None", ",", "blocksize", "=", "65536", ")", ":", "hasher", "=", "hasher", "or", "hashlib", ".", "sha1", "(", ")", "buf", "=", "fileobj", ".", "read", "(", "blocksize", ")", "while", "buf", ":...
Read from fileobj stream, return hash of its contents. Args: fileobj: File-like object with read() hasher: Hash object such as hashlib.sha1(). Defaults to sha1. blocksize: Read from fileobj this many bytes at a time.
[ "Read", "from", "fileobj", "stream", "return", "hash", "of", "its", "contents", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L38-L51
benley/butcher
butcher/util.py
hash_str
def hash_str(data, hasher=None): """Checksum hash a string.""" hasher = hasher or hashlib.sha1() hasher.update(data) return hasher
python
def hash_str(data, hasher=None): """Checksum hash a string.""" hasher = hasher or hashlib.sha1() hasher.update(data) return hasher
[ "def", "hash_str", "(", "data", ",", "hasher", "=", "None", ")", ":", "hasher", "=", "hasher", "or", "hashlib", ".", "sha1", "(", ")", "hasher", ".", "update", "(", "data", ")", "return", "hasher" ]
Checksum hash a string.
[ "Checksum", "hash", "a", "string", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L54-L58
benley/butcher
butcher/util.py
glob
def glob(*args): """Returns list of paths matching one or more wildcard patterns. Args: include_dirs: Include directories in the output """ if len(args) is 1 and isinstance(args[0], list): args = args[0] matches = [] for pattern in args: for item in glob2.glob(pattern): if not os.path.isdir(item): matches.append(item) return matches
python
def glob(*args): """Returns list of paths matching one or more wildcard patterns. Args: include_dirs: Include directories in the output """ if len(args) is 1 and isinstance(args[0], list): args = args[0] matches = [] for pattern in args: for item in glob2.glob(pattern): if not os.path.isdir(item): matches.append(item) return matches
[ "def", "glob", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "is", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", ":", "args", "=", "args", "[", "0", "]", "matches", "=", "[", "]", "for", "pattern", "in",...
Returns list of paths matching one or more wildcard patterns. Args: include_dirs: Include directories in the output
[ "Returns", "list", "of", "paths", "matching", "one", "or", "more", "wildcard", "patterns", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L75-L88
benley/butcher
butcher/util.py
flatten
def flatten(listish): """Flatten an arbitrarily-nested list of strings and lists. Works for any subclass of basestring and any type of iterable. """ for elem in listish: if (isinstance(elem, collections.Iterable) and not isinstance(elem, basestring)): for subelem in flatten(elem): yield subelem else: yield elem
python
def flatten(listish): """Flatten an arbitrarily-nested list of strings and lists. Works for any subclass of basestring and any type of iterable. """ for elem in listish: if (isinstance(elem, collections.Iterable) and not isinstance(elem, basestring)): for subelem in flatten(elem): yield subelem else: yield elem
[ "def", "flatten", "(", "listish", ")", ":", "for", "elem", "in", "listish", ":", "if", "(", "isinstance", "(", "elem", ",", "collections", ".", "Iterable", ")", "and", "not", "isinstance", "(", "elem", ",", "basestring", ")", ")", ":", "for", "subelem"...
Flatten an arbitrarily-nested list of strings and lists. Works for any subclass of basestring and any type of iterable.
[ "Flatten", "an", "arbitrarily", "-", "nested", "list", "of", "strings", "and", "lists", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L91-L102
benley/butcher
butcher/util.py
linkorcopy
def linkorcopy(src, dst): """Hardlink src file to dst if possible, otherwise copy.""" if not os.path.isfile(src): raise error.ButcherError('linkorcopy called with non-file source. ' '(src: %s dst: %s)' % src, dst) elif os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) try: os.link(src, dst) log.debug('Hardlinked: %s -> %s', src, dst) except OSError: shutil.copy2(src, dst) log.debug('Couldn\'t hardlink. Copied: %s -> %s', src, dst)
python
def linkorcopy(src, dst): """Hardlink src file to dst if possible, otherwise copy.""" if not os.path.isfile(src): raise error.ButcherError('linkorcopy called with non-file source. ' '(src: %s dst: %s)' % src, dst) elif os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) try: os.link(src, dst) log.debug('Hardlinked: %s -> %s', src, dst) except OSError: shutil.copy2(src, dst) log.debug('Couldn\'t hardlink. Copied: %s -> %s', src, dst)
[ "def", "linkorcopy", "(", "src", ",", "dst", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "src", ")", ":", "raise", "error", ".", "ButcherError", "(", "'linkorcopy called with non-file source. '", "'(src: %s dst: %s)'", "%", "src", ",", "ds...
Hardlink src file to dst if possible, otherwise copy.
[ "Hardlink", "src", "file", "to", "dst", "if", "possible", "otherwise", "copy", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L105-L122
adammhaile/gitdata
gitdata/pushd.py
Dir.getcwd
def getcwd(cls): """ Provide a context dependent current working directory. This method will return the directory currently holding the lock. """ if not hasattr(cls._tl, "cwd"): cls._tl.cwd = os.getcwd() return cls._tl.cwd
python
def getcwd(cls): """ Provide a context dependent current working directory. This method will return the directory currently holding the lock. """ if not hasattr(cls._tl, "cwd"): cls._tl.cwd = os.getcwd() return cls._tl.cwd
[ "def", "getcwd", "(", "cls", ")", ":", "if", "not", "hasattr", "(", "cls", ".", "_tl", ",", "\"cwd\"", ")", ":", "cls", ".", "_tl", ".", "cwd", "=", "os", ".", "getcwd", "(", ")", "return", "cls", ".", "_tl", ".", "cwd" ]
Provide a context dependent current working directory. This method will return the directory currently holding the lock.
[ "Provide", "a", "context", "dependent", "current", "working", "directory", ".", "This", "method", "will", "return", "the", "directory", "currently", "holding", "the", "lock", "." ]
train
https://github.com/adammhaile/gitdata/blob/93112899737d63855655d438e3027192abd76a37/gitdata/pushd.py#L52-L59
shaypal5/barn
barn/dataset.py
Dataset.fname
def fname(self, version=None, tags=None, ext=None): """Returns the filename appropriate for an instance of this dataset. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. Returns ------- str The appropariate filename. """ if ext is None: ext = self.default_ext return '{}{}{}.{}'.format( self.fname_base, self._tags_to_str(tags=tags), self._version_to_str(version=version), ext, )
python
def fname(self, version=None, tags=None, ext=None): """Returns the filename appropriate for an instance of this dataset. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. Returns ------- str The appropariate filename. """ if ext is None: ext = self.default_ext return '{}{}{}.{}'.format( self.fname_base, self._tags_to_str(tags=tags), self._version_to_str(version=version), ext, )
[ "def", "fname", "(", "self", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ")", ":", "if", "ext", "is", "None", ":", "ext", "=", "self", ".", "default_ext", "return", "'{}{}{}.{}'", ".", "format", "(", "self", ".",...
Returns the filename appropriate for an instance of this dataset. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. Returns ------- str The appropariate filename.
[ "Returns", "the", "filename", "appropriate", "for", "an", "instance", "of", "this", "dataset", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L72-L97
shaypal5/barn
barn/dataset.py
Dataset.fpath
def fpath(self, version=None, tags=None, ext=None): """Returns the filepath appropriate for an instance of this dataset. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. Returns ------- str The appropariate filepath. """ if self.singleton: return dataset_filepath( filename=self.fname(version=version, tags=tags, ext=ext), task=self.task, **self.kwargs, ) return dataset_filepath( filename=self.fname(version=version, tags=tags, ext=ext), dataset_name=self.name, task=self.task, **self.kwargs, )
python
def fpath(self, version=None, tags=None, ext=None): """Returns the filepath appropriate for an instance of this dataset. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. Returns ------- str The appropariate filepath. """ if self.singleton: return dataset_filepath( filename=self.fname(version=version, tags=tags, ext=ext), task=self.task, **self.kwargs, ) return dataset_filepath( filename=self.fname(version=version, tags=tags, ext=ext), dataset_name=self.name, task=self.task, **self.kwargs, )
[ "def", "fpath", "(", "self", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ")", ":", "if", "self", ".", "singleton", ":", "return", "dataset_filepath", "(", "filename", "=", "self", ".", "fname", "(", "version", "=",...
Returns the filepath appropriate for an instance of this dataset. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. Returns ------- str The appropariate filepath.
[ "Returns", "the", "filepath", "appropriate", "for", "an", "instance", "of", "this", "dataset", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L99-L128
shaypal5/barn
barn/dataset.py
Dataset.add_local
def add_local(self, source_fpath, version=None, tags=None): """Copies a given file into local store as an instance of this dataset. Parameters ---------- source_fpath : str The full path for the source file to use. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. Returns ------- ext : str The extension of the file added. """ ext = os.path.splitext(source_fpath)[1] ext = ext[1:] # we dont need the dot fpath = self.fpath(version=version, tags=tags, ext=ext) shutil.copyfile(src=source_fpath, dst=fpath) return ext
python
def add_local(self, source_fpath, version=None, tags=None): """Copies a given file into local store as an instance of this dataset. Parameters ---------- source_fpath : str The full path for the source file to use. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. Returns ------- ext : str The extension of the file added. """ ext = os.path.splitext(source_fpath)[1] ext = ext[1:] # we dont need the dot fpath = self.fpath(version=version, tags=tags, ext=ext) shutil.copyfile(src=source_fpath, dst=fpath) return ext
[ "def", "add_local", "(", "self", ",", "source_fpath", ",", "version", "=", "None", ",", "tags", "=", "None", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "source_fpath", ")", "[", "1", "]", "ext", "=", "ext", "[", "1", ":", "]",...
Copies a given file into local store as an instance of this dataset. Parameters ---------- source_fpath : str The full path for the source file to use. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. Returns ------- ext : str The extension of the file added.
[ "Copies", "a", "given", "file", "into", "local", "store", "as", "an", "instance", "of", "this", "dataset", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L130-L151
shaypal5/barn
barn/dataset.py
Dataset.upload
def upload(self, version=None, tags=None, ext=None, source_fpath=None, overwrite=False, **kwargs): """Uploads the given instance of this dataset to dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. If source_fpath is given, this is ignored, and the extension of the source f source_fpath : str, optional The full path for the source file to use. If given, the file is copied from the given path to the local storage path before uploading. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path. """ if source_fpath: ext = self.add_local( source_fpath=source_fpath, version=version, tags=tags) if ext is None: ext = self._find_extension(version=version, tags=tags) if ext is None: attribs = "{}{}".format( "version={} and ".format(version) if version else "", "tags={}".format(tags) if tags else "", ) raise MissingDatasetError( "No dataset with {} in local store!".format(attribs)) fpath = self.fpath(version=version, tags=tags, ext=ext) if not os.path.isfile(fpath): attribs = "{}{}ext={}".format( "version={} and ".format(version) if version else "", "tags={} and ".format(tags) if tags else "", ext, ) raise MissingDatasetError( "No dataset with {} in local store! (path={})".format( attribs, fpath)) upload_dataset( dataset_name=self.name, file_path=fpath, task=self.task, dataset_attributes=self.kwargs, **kwargs, )
python
def upload(self, version=None, tags=None, ext=None, source_fpath=None, overwrite=False, **kwargs): """Uploads the given instance of this dataset to dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. If source_fpath is given, this is ignored, and the extension of the source f source_fpath : str, optional The full path for the source file to use. If given, the file is copied from the given path to the local storage path before uploading. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path. """ if source_fpath: ext = self.add_local( source_fpath=source_fpath, version=version, tags=tags) if ext is None: ext = self._find_extension(version=version, tags=tags) if ext is None: attribs = "{}{}".format( "version={} and ".format(version) if version else "", "tags={}".format(tags) if tags else "", ) raise MissingDatasetError( "No dataset with {} in local store!".format(attribs)) fpath = self.fpath(version=version, tags=tags, ext=ext) if not os.path.isfile(fpath): attribs = "{}{}ext={}".format( "version={} and ".format(version) if version else "", "tags={} and ".format(tags) if tags else "", ext, ) raise MissingDatasetError( "No dataset with {} in local store! (path={})".format( attribs, fpath)) upload_dataset( dataset_name=self.name, file_path=fpath, task=self.task, dataset_attributes=self.kwargs, **kwargs, )
[ "def", "upload", "(", "self", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ",", "source_fpath", "=", "None", ",", "overwrite", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "source_fpath", ":", "ext", "="...
Uploads the given instance of this dataset to dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. If source_fpath is given, this is ignored, and the extension of the source f source_fpath : str, optional The full path for the source file to use. If given, the file is copied from the given path to the local storage path before uploading. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path.
[ "Uploads", "the", "given", "instance", "of", "this", "dataset", "to", "dataset", "store", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L174-L224
shaypal5/barn
barn/dataset.py
Dataset.download
def download(self, version=None, tags=None, ext=None, overwrite=False, verbose=False, **kwargs): """Downloads the given instance of this dataset from dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. overwrite : bool, default False If set to True, the given instance of the dataset is downloaded from dataset store even if it exists in the local data directory. Otherwise, if a matching dataset is found localy, download is skipped. verbose : bool, default False If set to True, informative messages are printed. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path. """ fpath = self.fpath(version=version, tags=tags, ext=ext) if os.path.isfile(fpath) and not overwrite: if verbose: print( "File exists and overwrite set to False, so not " "downloading {} with version={} and tags={}".format( self.name, version, tags)) return download_dataset( dataset_name=self.name, file_path=fpath, task=self.task, dataset_attributes=self.kwargs, **kwargs, )
python
def download(self, version=None, tags=None, ext=None, overwrite=False, verbose=False, **kwargs): """Downloads the given instance of this dataset from dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. overwrite : bool, default False If set to True, the given instance of the dataset is downloaded from dataset store even if it exists in the local data directory. Otherwise, if a matching dataset is found localy, download is skipped. verbose : bool, default False If set to True, informative messages are printed. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path. """ fpath = self.fpath(version=version, tags=tags, ext=ext) if os.path.isfile(fpath) and not overwrite: if verbose: print( "File exists and overwrite set to False, so not " "downloading {} with version={} and tags={}".format( self.name, version, tags)) return download_dataset( dataset_name=self.name, file_path=fpath, task=self.task, dataset_attributes=self.kwargs, **kwargs, )
[ "def", "download", "(", "self", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ",", "overwrite", "=", "False", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "fpath", "=", "self", ".", "fpath", "...
Downloads the given instance of this dataset from dataset store. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. overwrite : bool, default False If set to True, the given instance of the dataset is downloaded from dataset store even if it exists in the local data directory. Otherwise, if a matching dataset is found localy, download is skipped. verbose : bool, default False If set to True, informative messages are printed. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path.
[ "Downloads", "the", "given", "instance", "of", "this", "dataset", "from", "dataset", "store", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L226-L264
shaypal5/barn
barn/dataset.py
Dataset.df
def df(self, version=None, tags=None, ext=None, **kwargs): """Loads an instance of this dataset into a dataframe. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the desired instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the deserialization method of the SerializationFormat object corresponding to the extension used. Returns ------- pandas.DataFrame A dataframe containing the desired instance of this dataset. """ ext = self._find_extension(version=version, tags=tags) if ext is None: attribs = "{}{}".format( "version={} and ".format(version) if version else "", "tags={}".format(tags) if tags else "", ) raise MissingDatasetError( "No dataset with {} in local store!".format(attribs)) fpath = self.fpath(version=version, tags=tags, ext=ext) fmt = SerializationFormat.by_name(ext) return fmt.deserialize(fpath, **kwargs)
python
def df(self, version=None, tags=None, ext=None, **kwargs): """Loads an instance of this dataset into a dataframe. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the desired instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the deserialization method of the SerializationFormat object corresponding to the extension used. Returns ------- pandas.DataFrame A dataframe containing the desired instance of this dataset. """ ext = self._find_extension(version=version, tags=tags) if ext is None: attribs = "{}{}".format( "version={} and ".format(version) if version else "", "tags={}".format(tags) if tags else "", ) raise MissingDatasetError( "No dataset with {} in local store!".format(attribs)) fpath = self.fpath(version=version, tags=tags, ext=ext) fmt = SerializationFormat.by_name(ext) return fmt.deserialize(fpath, **kwargs)
[ "def", "df", "(", "self", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ext", "=", "self", ".", "_find_extension", "(", "version", "=", "version", ",", "tags", "=", "tags", ")"...
Loads an instance of this dataset into a dataframe. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the desired instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the deserialization method of the SerializationFormat object corresponding to the extension used. Returns ------- pandas.DataFrame A dataframe containing the desired instance of this dataset.
[ "Loads", "an", "instance", "of", "this", "dataset", "into", "a", "dataframe", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L266-L298
shaypal5/barn
barn/dataset.py
Dataset.dump_df
def dump_df(self, df, version=None, tags=None, ext=None, **kwargs): """Dumps an instance of this dataset into a file. Parameters ---------- df : pandas.DataFrame The dataframe to dump to file. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the serialization method of the SerializationFormat object corresponding to the extension used. """ if ext is None: ext = self.default_ext fpath = self.fpath(version=version, tags=tags, ext=ext) fmt = SerializationFormat.by_name(ext) fmt.serialize(df, fpath, **kwargs)
python
def dump_df(self, df, version=None, tags=None, ext=None, **kwargs): """Dumps an instance of this dataset into a file. Parameters ---------- df : pandas.DataFrame The dataframe to dump to file. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the serialization method of the SerializationFormat object corresponding to the extension used. """ if ext is None: ext = self.default_ext fpath = self.fpath(version=version, tags=tags, ext=ext) fmt = SerializationFormat.by_name(ext) fmt.serialize(df, fpath, **kwargs)
[ "def", "dump_df", "(", "self", ",", "df", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ext", "is", "None", ":", "ext", "=", "self", ".", "default_ext", "fpath", "=", "...
Dumps an instance of this dataset into a file. Parameters ---------- df : pandas.DataFrame The dataframe to dump to file. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the serialization method of the SerializationFormat object corresponding to the extension used.
[ "Dumps", "an", "instance", "of", "this", "dataset", "into", "a", "file", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L300-L323
shaypal5/barn
barn/dataset.py
Dataset.upload_df
def upload_df(self, df, version=None, tags=None, ext=None, **kwargs): """Dumps an instance of this dataset into a file and then uploads it to dataset store. Parameters ---------- df : pandas.DataFrame The dataframe to dump and upload. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the serialization method of the SerializationFormat object corresponding to the extension used. """ self.dump_df(df=df, version=version, tags=tags, ext=ext, **kwargs) self.upload(version=version, tags=tags, ext=ext)
python
def upload_df(self, df, version=None, tags=None, ext=None, **kwargs): """Dumps an instance of this dataset into a file and then uploads it to dataset store. Parameters ---------- df : pandas.DataFrame The dataframe to dump and upload. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the serialization method of the SerializationFormat object corresponding to the extension used. """ self.dump_df(df=df, version=version, tags=tags, ext=ext, **kwargs) self.upload(version=version, tags=tags, ext=ext)
[ "def", "upload_df", "(", "self", ",", "df", ",", "version", "=", "None", ",", "tags", "=", "None", ",", "ext", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "dump_df", "(", "df", "=", "df", ",", "version", "=", "version", ",", "t...
Dumps an instance of this dataset into a file and then uploads it to dataset store. Parameters ---------- df : pandas.DataFrame The dataframe to dump and upload. version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the given instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the serialization method of the SerializationFormat object corresponding to the extension used.
[ "Dumps", "an", "instance", "of", "this", "dataset", "into", "a", "file", "and", "then", "uploads", "it", "to", "dataset", "store", "." ]
train
https://github.com/shaypal5/barn/blob/85958a0f9ac94943729605e70527ee726d3f3007/barn/dataset.py#L325-L346
duniter/duniter-python-api
duniterpy/documents/identity.py
Identity.from_inline
def from_inline(cls: Type[IdentityType], version: int, currency: str, inline: str) -> IdentityType: """ Return Identity instance from inline Identity string :param version: Document version number :param currency: Name of the currency :param inline: Inline string of the Identity :return: """ selfcert_data = Identity.re_inline.match(inline) if selfcert_data is None: raise MalformedDocumentError("Inline self certification") pubkey = selfcert_data.group(1) signature = selfcert_data.group(2) ts = BlockUID.from_str(selfcert_data.group(3)) uid = selfcert_data.group(4) return cls(version, currency, pubkey, uid, ts, signature)
python
def from_inline(cls: Type[IdentityType], version: int, currency: str, inline: str) -> IdentityType: """ Return Identity instance from inline Identity string :param version: Document version number :param currency: Name of the currency :param inline: Inline string of the Identity :return: """ selfcert_data = Identity.re_inline.match(inline) if selfcert_data is None: raise MalformedDocumentError("Inline self certification") pubkey = selfcert_data.group(1) signature = selfcert_data.group(2) ts = BlockUID.from_str(selfcert_data.group(3)) uid = selfcert_data.group(4) return cls(version, currency, pubkey, uid, ts, signature)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "IdentityType", "]", ",", "version", ":", "int", ",", "currency", ":", "str", ",", "inline", ":", "str", ")", "->", "IdentityType", ":", "selfcert_data", "=", "Identity", ".", "re_inline", ".", "match",...
Return Identity instance from inline Identity string :param version: Document version number :param currency: Name of the currency :param inline: Inline string of the Identity :return:
[ "Return", "Identity", "instance", "from", "inline", "Identity", "string", ":", "param", "version", ":", "Document", "version", "number", ":", "param", "currency", ":", "Name", "of", "the", "currency", ":", "param", "inline", ":", "Inline", "string", "of", "t...
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/identity.py#L56-L72
duniter/duniter-python-api
duniterpy/documents/identity.py
Identity.from_signed_raw
def from_signed_raw(cls: Type[IdentityType], signed_raw: str) -> IdentityType: """ Return Identity instance from a signed_raw string :param signed_raw: Signed raw document :return: """ n = 0 lines = signed_raw.splitlines(True) version = int(Identity.parse_field("Version", lines[n])) n += 1 Identity.parse_field("Type", lines[n]) n += 1 currency = Identity.parse_field("Currency", lines[n]) n += 1 pubkey = Identity.parse_field("Issuer", lines[n]) n += 1 uid = Identity.parse_field("UniqueID", lines[n]) n += 1 ts = BlockUID.from_str(Identity.parse_field("Timestamp", lines[n])) n += 1 signature = Identity.parse_field("Signature", lines[n]) return cls(version, currency, pubkey, uid, ts, signature)
python
def from_signed_raw(cls: Type[IdentityType], signed_raw: str) -> IdentityType: """ Return Identity instance from a signed_raw string :param signed_raw: Signed raw document :return: """ n = 0 lines = signed_raw.splitlines(True) version = int(Identity.parse_field("Version", lines[n])) n += 1 Identity.parse_field("Type", lines[n]) n += 1 currency = Identity.parse_field("Currency", lines[n]) n += 1 pubkey = Identity.parse_field("Issuer", lines[n]) n += 1 uid = Identity.parse_field("UniqueID", lines[n]) n += 1 ts = BlockUID.from_str(Identity.parse_field("Timestamp", lines[n])) n += 1 signature = Identity.parse_field("Signature", lines[n]) return cls(version, currency, pubkey, uid, ts, signature)
[ "def", "from_signed_raw", "(", "cls", ":", "Type", "[", "IdentityType", "]", ",", "signed_raw", ":", "str", ")", "->", "IdentityType", ":", "n", "=", "0", "lines", "=", "signed_raw", ".", "splitlines", "(", "True", ")", "version", "=", "int", "(", "Ide...
Return Identity instance from a signed_raw string :param signed_raw: Signed raw document :return:
[ "Return", "Identity", "instance", "from", "a", "signed_raw", "string", ":", "param", "signed_raw", ":", "Signed", "raw", "document", ":", "return", ":" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/identity.py#L75-L104
duniter/duniter-python-api
duniterpy/documents/identity.py
Identity.raw
def raw(self) -> str: """ Return a raw document of the Identity :return: """ return """Version: {version} Type: Identity Currency: {currency} Issuer: {pubkey} UniqueID: {uid} Timestamp: {timestamp} """.format(version=self.version, currency=self.currency, pubkey=self.pubkey, uid=self.uid, timestamp=self.timestamp)
python
def raw(self) -> str: """ Return a raw document of the Identity :return: """ return """Version: {version} Type: Identity Currency: {currency} Issuer: {pubkey} UniqueID: {uid} Timestamp: {timestamp} """.format(version=self.version, currency=self.currency, pubkey=self.pubkey, uid=self.uid, timestamp=self.timestamp)
[ "def", "raw", "(", "self", ")", "->", "str", ":", "return", "\"\"\"Version: {version}\nType: Identity\nCurrency: {currency}\nIssuer: {pubkey}\nUniqueID: {uid}\nTimestamp: {timestamp}\n\"\"\"", ".", "format", "(", "version", "=", "self", ".", "version", ",", "currency", "=", ...
Return a raw document of the Identity :return:
[ "Return", "a", "raw", "document", "of", "the", "Identity", ":", "return", ":" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/identity.py#L106-L121
duniter/duniter-python-api
duniterpy/documents/identity.py
Identity.inline
def inline(self) -> str: """ Return an inline string of the Identity :return: """ return "{pubkey}:{signature}:{timestamp}:{uid}".format( pubkey=self.pubkey, signature=self.signatures[0], timestamp=self.timestamp, uid=self.uid)
python
def inline(self) -> str: """ Return an inline string of the Identity :return: """ return "{pubkey}:{signature}:{timestamp}:{uid}".format( pubkey=self.pubkey, signature=self.signatures[0], timestamp=self.timestamp, uid=self.uid)
[ "def", "inline", "(", "self", ")", "->", "str", ":", "return", "\"{pubkey}:{signature}:{timestamp}:{uid}\"", ".", "format", "(", "pubkey", "=", "self", ".", "pubkey", ",", "signature", "=", "self", ".", "signatures", "[", "0", "]", ",", "timestamp", "=", "...
Return an inline string of the Identity :return:
[ "Return", "an", "inline", "string", "of", "the", "Identity", ":", "return", ":" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/identity.py#L123-L132
sivakov512/python-static-api-generator
static_api_generator/loaders.py
BaseLoader._validate_extension
def _validate_extension(self): """Validates that source file extension is supported. :raises: UnsupportedExtensionError """ extension = self.fpath.split('.')[-1] if extension not in self.supported_extensions: raise UnsupportedExtensionError
python
def _validate_extension(self): """Validates that source file extension is supported. :raises: UnsupportedExtensionError """ extension = self.fpath.split('.')[-1] if extension not in self.supported_extensions: raise UnsupportedExtensionError
[ "def", "_validate_extension", "(", "self", ")", ":", "extension", "=", "self", ".", "fpath", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "extension", "not", "in", "self", ".", "supported_extensions", ":", "raise", "UnsupportedExtensionError" ]
Validates that source file extension is supported. :raises: UnsupportedExtensionError
[ "Validates", "that", "source", "file", "extension", "is", "supported", "." ]
train
https://github.com/sivakov512/python-static-api-generator/blob/0a7ec27324b9b2a3d1fa9894c4cba73af9ebcc01/static_api_generator/loaders.py#L26-L34
sivakov512/python-static-api-generator
static_api_generator/loaders.py
BaseLoader.convert_content
def convert_content(self) -> dict: """Convert content of source file into dict result.""" source_content = self.load_content() converted = {} tagged, content = self._get_tags_and_content(source_content) if tagged: converted.update(self._parse_tags(tagged)) if content: converted['content'] = content.strip() return converted
python
def convert_content(self) -> dict: """Convert content of source file into dict result.""" source_content = self.load_content() converted = {} tagged, content = self._get_tags_and_content(source_content) if tagged: converted.update(self._parse_tags(tagged)) if content: converted['content'] = content.strip() return converted
[ "def", "convert_content", "(", "self", ")", "->", "dict", ":", "source_content", "=", "self", ".", "load_content", "(", ")", "converted", "=", "{", "}", "tagged", ",", "content", "=", "self", ".", "_get_tags_and_content", "(", "source_content", ")", "if", ...
Convert content of source file into dict result.
[ "Convert", "content", "of", "source", "file", "into", "dict", "result", "." ]
train
https://github.com/sivakov512/python-static-api-generator/blob/0a7ec27324b9b2a3d1fa9894c4cba73af9ebcc01/static_api_generator/loaders.py#L41-L53
sivakov512/python-static-api-generator
static_api_generator/loaders.py
BaseLoader._get_tags_and_content
def _get_tags_and_content(self, content: str) -> typing.Tuple[str, str]: """Splits content into two string - tags part and another content.""" content_lines = content.split('\n') tag_lines = [] if content_lines[0] != '---': return '', content content_lines.pop(0) for line in content_lines: # type: str if line in ('---', '...'): content_starts_at = content_lines.index(line) + 1 content_lines = content_lines[content_starts_at:] break tag_lines.append(line) return '\n'.join(tag_lines), '\n'.join(content_lines)
python
def _get_tags_and_content(self, content: str) -> typing.Tuple[str, str]: """Splits content into two string - tags part and another content.""" content_lines = content.split('\n') tag_lines = [] if content_lines[0] != '---': return '', content content_lines.pop(0) for line in content_lines: # type: str if line in ('---', '...'): content_starts_at = content_lines.index(line) + 1 content_lines = content_lines[content_starts_at:] break tag_lines.append(line) return '\n'.join(tag_lines), '\n'.join(content_lines)
[ "def", "_get_tags_and_content", "(", "self", ",", "content", ":", "str", ")", "->", "typing", ".", "Tuple", "[", "str", ",", "str", "]", ":", "content_lines", "=", "content", ".", "split", "(", "'\\n'", ")", "tag_lines", "=", "[", "]", "if", "content_l...
Splits content into two string - tags part and another content.
[ "Splits", "content", "into", "two", "string", "-", "tags", "part", "and", "another", "content", "." ]
train
https://github.com/sivakov512/python-static-api-generator/blob/0a7ec27324b9b2a3d1fa9894c4cba73af9ebcc01/static_api_generator/loaders.py#L59-L76
dependencies-io/schema
dependencies_schema/v1/parsing.py
parse
def parse(text): """ Parses the dependency schema from a given string (typically a container stdout log) """ found = re.findall(r'(?<=BEGIN_DEPENDENCIES_SCHEMA_OUTPUT>).*(?=<END_DEPENDENCIES_SCHEMA_OUTPUT)', text) dependency_results = [] for match in found: data = json.loads(match) validate(data) # will throw ValidationError if invalid dependency_results += data['dependencies'] # we don't have any other fields yet, but in the future # may have schema 'version' in which case we'd want to check # the versions and compile all the results into 1 schema? combined_results = {'dependencies': dependency_results} return combined_results
python
def parse(text): """ Parses the dependency schema from a given string (typically a container stdout log) """ found = re.findall(r'(?<=BEGIN_DEPENDENCIES_SCHEMA_OUTPUT>).*(?=<END_DEPENDENCIES_SCHEMA_OUTPUT)', text) dependency_results = [] for match in found: data = json.loads(match) validate(data) # will throw ValidationError if invalid dependency_results += data['dependencies'] # we don't have any other fields yet, but in the future # may have schema 'version' in which case we'd want to check # the versions and compile all the results into 1 schema? combined_results = {'dependencies': dependency_results} return combined_results
[ "def", "parse", "(", "text", ")", ":", "found", "=", "re", ".", "findall", "(", "r'(?<=BEGIN_DEPENDENCIES_SCHEMA_OUTPUT>).*(?=<END_DEPENDENCIES_SCHEMA_OUTPUT)'", ",", "text", ")", "dependency_results", "=", "[", "]", "for", "match", "in", "found", ":", "data", "="...
Parses the dependency schema from a given string (typically a container stdout log)
[ "Parses", "the", "dependency", "schema", "from", "a", "given", "string", "(", "typically", "a", "container", "stdout", "log", ")" ]
train
https://github.com/dependencies-io/schema/blob/32c7b3eca6b814151b74994fcd0e3cc2b03e9115/dependencies_schema/v1/parsing.py#L7-L29
ShawnClake/Apitax
apitax/ah/api/controllers/users_controller.py
command
def command(execute=None): # noqa: E501 """Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response """ if connexion.request.is_json: execute = Execute.from_dict(connexion.request.get_json()) # noqa: E501 if(not hasAccess()): return redirectUnauthorized() try: connector = None parameters = {} if (execute.command.parameters): parameters = execute.command.parameters credentials = Credentials() options = Options(debug=execute.command.options['debug'], sensitive=execute.command.options['sensitive']) if (execute.auth): credentials = mapUserAuthToCredentials(execute.auth, credentials) if (not execute.auth.api_token): options.sensitive = True connector = Connector(options=options, credentials=credentials, command=execute.command.command, parameters=parameters) commandHandler = connector.execute() response = Response(status=commandHandler.getRequest().getResponseStatusCode(), body=json.loads(commandHandler.getRequest().getResponseBody())) if (execute.command.options['debug']): response.log = connector.logBuffer return response except: State.log.error(traceback.format_exc()) if ('debug' in execute.command.options and execute.command.options['debug']): return ErrorResponse(status=500, message="Uncaught exception occured during processing. To get a larger stack trace, visit the logs.", state=traceback.format_exc(3)) else: return ErrorResponse(status=500, message="")
python
def command(execute=None): # noqa: E501 """Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response """ if connexion.request.is_json: execute = Execute.from_dict(connexion.request.get_json()) # noqa: E501 if(not hasAccess()): return redirectUnauthorized() try: connector = None parameters = {} if (execute.command.parameters): parameters = execute.command.parameters credentials = Credentials() options = Options(debug=execute.command.options['debug'], sensitive=execute.command.options['sensitive']) if (execute.auth): credentials = mapUserAuthToCredentials(execute.auth, credentials) if (not execute.auth.api_token): options.sensitive = True connector = Connector(options=options, credentials=credentials, command=execute.command.command, parameters=parameters) commandHandler = connector.execute() response = Response(status=commandHandler.getRequest().getResponseStatusCode(), body=json.loads(commandHandler.getRequest().getResponseBody())) if (execute.command.options['debug']): response.log = connector.logBuffer return response except: State.log.error(traceback.format_exc()) if ('debug' in execute.command.options and execute.command.options['debug']): return ErrorResponse(status=500, message="Uncaught exception occured during processing. To get a larger stack trace, visit the logs.", state=traceback.format_exc(3)) else: return ErrorResponse(status=500, message="")
[ "def", "command", "(", "execute", "=", "None", ")", ":", "# noqa: E501", "if", "connexion", ".", "request", ".", "is_json", ":", "execute", "=", "Execute", ".", "from_dict", "(", "connexion", ".", "request", ".", "get_json", "(", ")", ")", "# noqa: E501", ...
Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response
[ "Execute", "a", "Command" ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/controllers/users_controller.py#L30-L82
ShawnClake/Apitax
apitax/ah/api/controllers/users_controller.py
get_script
def get_script(name=None): # noqa: E501 """Retrieve the contents of a script Retrieve the contents of a script # noqa: E501 :param name: The script name. :type name: str :rtype: Response """ if(not hasAccess()): return redirectUnauthorized() driver = LoadedDrivers.getDefaultBaseDriver() return Response(status=200, body=driver.readScript(name))
python
def get_script(name=None): # noqa: E501 """Retrieve the contents of a script Retrieve the contents of a script # noqa: E501 :param name: The script name. :type name: str :rtype: Response """ if(not hasAccess()): return redirectUnauthorized() driver = LoadedDrivers.getDefaultBaseDriver() return Response(status=200, body=driver.readScript(name))
[ "def", "get_script", "(", "name", "=", "None", ")", ":", "# noqa: E501", "if", "(", "not", "hasAccess", "(", ")", ")", ":", "return", "redirectUnauthorized", "(", ")", "driver", "=", "LoadedDrivers", ".", "getDefaultBaseDriver", "(", ")", "return", "Response...
Retrieve the contents of a script Retrieve the contents of a script # noqa: E501 :param name: The script name. :type name: str :rtype: Response
[ "Retrieve", "the", "contents", "of", "a", "script" ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/controllers/users_controller.py#L126-L141
caktus/django-comps
comps/views.py
comp_listing
def comp_listing(request, directory_slug=None): """ Output the list of HTML templates and subdirectories in the COMPS_DIR """ context = {} working_dir = settings.COMPS_DIR if directory_slug: working_dir = os.path.join(working_dir, directory_slug) dirnames = [] templates = [] items = os.listdir(working_dir) templates = [x for x in items if os.path.splitext(x)[1] == '.html'] dirnames = [x for x in items if \ not os.path.isfile(os.path.join(working_dir, x))] templates.sort() dirnames.sort() context['directories'] = dirnames context['templates'] = templates context['subdirectory'] = directory_slug return render(request, "comps/comp_listing.html", context)
python
def comp_listing(request, directory_slug=None): """ Output the list of HTML templates and subdirectories in the COMPS_DIR """ context = {} working_dir = settings.COMPS_DIR if directory_slug: working_dir = os.path.join(working_dir, directory_slug) dirnames = [] templates = [] items = os.listdir(working_dir) templates = [x for x in items if os.path.splitext(x)[1] == '.html'] dirnames = [x for x in items if \ not os.path.isfile(os.path.join(working_dir, x))] templates.sort() dirnames.sort() context['directories'] = dirnames context['templates'] = templates context['subdirectory'] = directory_slug return render(request, "comps/comp_listing.html", context)
[ "def", "comp_listing", "(", "request", ",", "directory_slug", "=", "None", ")", ":", "context", "=", "{", "}", "working_dir", "=", "settings", ".", "COMPS_DIR", "if", "directory_slug", ":", "working_dir", "=", "os", ".", "path", ".", "join", "(", "working_...
Output the list of HTML templates and subdirectories in the COMPS_DIR
[ "Output", "the", "list", "of", "HTML", "templates", "and", "subdirectories", "in", "the", "COMPS_DIR" ]
train
https://github.com/caktus/django-comps/blob/351c83634f556a420212268f4e7b5882a049d3c2/comps/views.py#L17-L36
caktus/django-comps
comps/views.py
comp
def comp(request, slug, directory_slug=None): """ View the requested comp """ context = {} path = settings.COMPS_DIR comp_dir = os.path.split(path)[1] template = "{0}/{1}".format(comp_dir, slug) if directory_slug: template = "{0}/{1}/{2}".format(comp_dir, directory_slug, slug) working_dir = os.path.join(path, slug) if os.path.isdir(working_dir): return redirect('comp-listing', directory_slug=slug) try: t = get_template(template) except TemplateDoesNotExist: return redirect('comp-listing') c = RequestContext(request, context) return HttpResponse(t.render(c))
python
def comp(request, slug, directory_slug=None): """ View the requested comp """ context = {} path = settings.COMPS_DIR comp_dir = os.path.split(path)[1] template = "{0}/{1}".format(comp_dir, slug) if directory_slug: template = "{0}/{1}/{2}".format(comp_dir, directory_slug, slug) working_dir = os.path.join(path, slug) if os.path.isdir(working_dir): return redirect('comp-listing', directory_slug=slug) try: t = get_template(template) except TemplateDoesNotExist: return redirect('comp-listing') c = RequestContext(request, context) return HttpResponse(t.render(c))
[ "def", "comp", "(", "request", ",", "slug", ",", "directory_slug", "=", "None", ")", ":", "context", "=", "{", "}", "path", "=", "settings", ".", "COMPS_DIR", "comp_dir", "=", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", "templ...
View the requested comp
[ "View", "the", "requested", "comp" ]
train
https://github.com/caktus/django-comps/blob/351c83634f556a420212268f4e7b5882a049d3c2/comps/views.py#L39-L59
caktus/django-comps
comps/views.py
export_comps
def export_comps(request): """ Returns a zipfile of the rendered HTML templates in the COMPS_DIR """ in_memory = BytesIO() zip = ZipFile(in_memory, "a") comps = settings.COMPS_DIR static = settings.STATIC_ROOT or "" context = RequestContext(request, {}) context['debug'] = False # dump static resources # TODO: inspect each template and only pull in resources that are used for dirname, dirs, filenames in os.walk(static): for filename in filenames: full_path = os.path.join(dirname, filename) rel_path = os.path.relpath(full_path, static) content = open(full_path, 'rb').read() try: ext = os.path.splitext(filename)[1] except IndexError: pass if ext == '.css': # convert static refs to relative links dotted_rel = os.path.relpath(static, full_path) new_rel_path = '{0}{1}'.format(dotted_rel, '/static') content = content.replace(b'/static', bytes(new_rel_path, 'utf8')) path = os.path.join('static', rel_path) zip.writestr(path, content) for dirname, dirs, filenames in os.walk(comps): for filename in filenames: full_path = os.path.join(dirname, filename) rel_path = os.path.relpath(full_path, comps) template_path = os.path.join(comps.split('/')[-1], rel_path) html = render_to_string(template_path, context) # convert static refs to relative links depth = len(rel_path.split(os.sep)) - 1 if depth == 0: dotted_rel = '.' else: dotted_rel = '' i = 0 while i < depth: dotted_rel += '../' i += 1 new_rel_path = '{0}{1}'.format(dotted_rel, '/static') html = html.replace('/static', new_rel_path) if PY2: html = unicode(html) zip.writestr(rel_path, html.encode('utf8')) for item in zip.filelist: item.create_system = 0 zip.close() response = HttpResponse(content_type="application/zip") response["Content-Disposition"] = "attachment; filename=comps.zip" in_memory.seek(0) response.write(in_memory.read()) return response
python
def export_comps(request): """ Returns a zipfile of the rendered HTML templates in the COMPS_DIR """ in_memory = BytesIO() zip = ZipFile(in_memory, "a") comps = settings.COMPS_DIR static = settings.STATIC_ROOT or "" context = RequestContext(request, {}) context['debug'] = False # dump static resources # TODO: inspect each template and only pull in resources that are used for dirname, dirs, filenames in os.walk(static): for filename in filenames: full_path = os.path.join(dirname, filename) rel_path = os.path.relpath(full_path, static) content = open(full_path, 'rb').read() try: ext = os.path.splitext(filename)[1] except IndexError: pass if ext == '.css': # convert static refs to relative links dotted_rel = os.path.relpath(static, full_path) new_rel_path = '{0}{1}'.format(dotted_rel, '/static') content = content.replace(b'/static', bytes(new_rel_path, 'utf8')) path = os.path.join('static', rel_path) zip.writestr(path, content) for dirname, dirs, filenames in os.walk(comps): for filename in filenames: full_path = os.path.join(dirname, filename) rel_path = os.path.relpath(full_path, comps) template_path = os.path.join(comps.split('/')[-1], rel_path) html = render_to_string(template_path, context) # convert static refs to relative links depth = len(rel_path.split(os.sep)) - 1 if depth == 0: dotted_rel = '.' else: dotted_rel = '' i = 0 while i < depth: dotted_rel += '../' i += 1 new_rel_path = '{0}{1}'.format(dotted_rel, '/static') html = html.replace('/static', new_rel_path) if PY2: html = unicode(html) zip.writestr(rel_path, html.encode('utf8')) for item in zip.filelist: item.create_system = 0 zip.close() response = HttpResponse(content_type="application/zip") response["Content-Disposition"] = "attachment; filename=comps.zip" in_memory.seek(0) response.write(in_memory.read()) return response
[ "def", "export_comps", "(", "request", ")", ":", "in_memory", "=", "BytesIO", "(", ")", "zip", "=", "ZipFile", "(", "in_memory", ",", "\"a\"", ")", "comps", "=", "settings", ".", "COMPS_DIR", "static", "=", "settings", ".", "STATIC_ROOT", "or", "\"\"", "...
Returns a zipfile of the rendered HTML templates in the COMPS_DIR
[ "Returns", "a", "zipfile", "of", "the", "rendered", "HTML", "templates", "in", "the", "COMPS_DIR" ]
train
https://github.com/caktus/django-comps/blob/351c83634f556a420212268f4e7b5882a049d3c2/comps/views.py#L62-L124
duniter/duniter-python-api
duniterpy/documents/peer.py
Peer.from_signed_raw
def from_signed_raw(cls: Type[PeerType], raw: str) -> PeerType: """ Return a Peer instance from a signed raw format string :param raw: Signed raw format string :return: """ lines = raw.splitlines(True) n = 0 version = int(Peer.parse_field("Version", lines[n])) n += 1 Peer.parse_field("Type", lines[n]) n += 1 currency = Peer.parse_field("Currency", lines[n]) n += 1 pubkey = Peer.parse_field("Pubkey", lines[n]) n += 1 block_uid = BlockUID.from_str(Peer.parse_field("Block", lines[n])) n += 1 Peer.parse_field("Endpoints", lines[n]) n += 1 endpoints = [] while not Peer.re_signature.match(lines[n]): endpoints.append(endpoint(lines[n])) n += 1 data = Peer.re_signature.match(lines[n]) if data is None: raise MalformedDocumentError("Peer") signature = data.group(1) return cls(version, currency, pubkey, block_uid, endpoints, signature)
python
def from_signed_raw(cls: Type[PeerType], raw: str) -> PeerType: """ Return a Peer instance from a signed raw format string :param raw: Signed raw format string :return: """ lines = raw.splitlines(True) n = 0 version = int(Peer.parse_field("Version", lines[n])) n += 1 Peer.parse_field("Type", lines[n]) n += 1 currency = Peer.parse_field("Currency", lines[n]) n += 1 pubkey = Peer.parse_field("Pubkey", lines[n]) n += 1 block_uid = BlockUID.from_str(Peer.parse_field("Block", lines[n])) n += 1 Peer.parse_field("Endpoints", lines[n]) n += 1 endpoints = [] while not Peer.re_signature.match(lines[n]): endpoints.append(endpoint(lines[n])) n += 1 data = Peer.re_signature.match(lines[n]) if data is None: raise MalformedDocumentError("Peer") signature = data.group(1) return cls(version, currency, pubkey, block_uid, endpoints, signature)
[ "def", "from_signed_raw", "(", "cls", ":", "Type", "[", "PeerType", "]", ",", "raw", ":", "str", ")", "->", "PeerType", ":", "lines", "=", "raw", ".", "splitlines", "(", "True", ")", "n", "=", "0", "version", "=", "int", "(", "Peer", ".", "parse_fi...
Return a Peer instance from a signed raw format string :param raw: Signed raw format string :return:
[ "Return", "a", "Peer", "instance", "from", "a", "signed", "raw", "format", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/peer.py#L61-L99
duniter/duniter-python-api
duniterpy/documents/peer.py
Peer.raw
def raw(self) -> str: """ Return a raw format string of the Peer document :return: """ doc = """Version: {0} Type: Peer Currency: {1} PublicKey: {2} Block: {3} Endpoints: """.format(self.version, self.currency, self.pubkey, self.blockUID) for _endpoint in self.endpoints: doc += "{0}\n".format(_endpoint.inline()) return doc
python
def raw(self) -> str: """ Return a raw format string of the Peer document :return: """ doc = """Version: {0} Type: Peer Currency: {1} PublicKey: {2} Block: {3} Endpoints: """.format(self.version, self.currency, self.pubkey, self.blockUID) for _endpoint in self.endpoints: doc += "{0}\n".format(_endpoint.inline()) return doc
[ "def", "raw", "(", "self", ")", "->", "str", ":", "doc", "=", "\"\"\"Version: {0}\nType: Peer\nCurrency: {1}\nPublicKey: {2}\nBlock: {3}\nEndpoints:\n\"\"\"", ".", "format", "(", "self", ".", "version", ",", "self", ".", "currency", ",", "self", ".", "pubkey", ",", ...
Return a raw format string of the Peer document :return:
[ "Return", "a", "raw", "format", "string", "of", "the", "Peer", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/peer.py#L101-L118
marteinn/AtomicPress
atomicpress/utils/files.py
generate_image_from_url
def generate_image_from_url(url=None, timeout=30): """ Downloads and saves a image from url into a file. """ file_name = posixpath.basename(url) img_tmp = NamedTemporaryFile(delete=True) try: response = requests.get(url, timeout=timeout) response.raise_for_status() except Exception as e: # NOQA return None, None img_tmp.write(response.content) img_tmp.flush() image = File(img_tmp) image.seek(0) return file_name, image
python
def generate_image_from_url(url=None, timeout=30): """ Downloads and saves a image from url into a file. """ file_name = posixpath.basename(url) img_tmp = NamedTemporaryFile(delete=True) try: response = requests.get(url, timeout=timeout) response.raise_for_status() except Exception as e: # NOQA return None, None img_tmp.write(response.content) img_tmp.flush() image = File(img_tmp) image.seek(0) return file_name, image
[ "def", "generate_image_from_url", "(", "url", "=", "None", ",", "timeout", "=", "30", ")", ":", "file_name", "=", "posixpath", ".", "basename", "(", "url", ")", "img_tmp", "=", "NamedTemporaryFile", "(", "delete", "=", "True", ")", "try", ":", "response", ...
Downloads and saves a image from url into a file.
[ "Downloads", "and", "saves", "a", "image", "from", "url", "into", "a", "file", "." ]
train
https://github.com/marteinn/AtomicPress/blob/b8a0ca9c9c327f062833fc4a401a8ac0baccf6d1/atomicpress/utils/files.py#L16-L36
jvamvas/rhymediscovery
rhymediscovery/celex.py
is_rhyme
def is_rhyme(d, w1, w2): """check if words rhyme""" for p1 in d[w1]: # extract only "rhyming portion" p1 = p1.split("'")[-1] m = VOWELS_RE.search(p1) if not m: print(p1) p1 = p1[m.start():] for p2 in d[w2]: p2 = p2.split("'")[-1] m = VOWELS_RE.search(p2) if not m: print(w2, p2) p2 = p2[m.start():] if p1 == p2: return True return False
python
def is_rhyme(d, w1, w2): """check if words rhyme""" for p1 in d[w1]: # extract only "rhyming portion" p1 = p1.split("'")[-1] m = VOWELS_RE.search(p1) if not m: print(p1) p1 = p1[m.start():] for p2 in d[w2]: p2 = p2.split("'")[-1] m = VOWELS_RE.search(p2) if not m: print(w2, p2) p2 = p2[m.start():] if p1 == p2: return True return False
[ "def", "is_rhyme", "(", "d", ",", "w1", ",", "w2", ")", ":", "for", "p1", "in", "d", "[", "w1", "]", ":", "# extract only \"rhyming portion\"", "p1", "=", "p1", ".", "split", "(", "\"'\"", ")", "[", "-", "1", "]", "m", "=", "VOWELS_RE", ".", "sea...
check if words rhyme
[ "check", "if", "words", "rhyme" ]
train
https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/celex.py#L24-L41
jvamvas/rhymediscovery
rhymediscovery/celex.py
init_perfect_ttable
def init_perfect_ttable(words): """initialize (normalized) theta according to whether words rhyme""" d = read_celex() not_in_dict = 0 n = len(words) t_table = numpy.zeros((n, n + 1)) # initialize P(c|r) accordingly for r, w in enumerate(words): if w not in d: not_in_dict += 1 for c, v in enumerate(words): if c < r: t_table[r, c] = t_table[c, r] elif w in d and v in d: t_table[r, c] = int(is_rhyme(d, w, v)) + 0.001 # for backoff else: t_table[r, c] = random.random() t_table[r, n] = random.random() # no estimate for P(r|no history) print(not_in_dict, "of", n, " words are not in CELEX") # normalize for c in range(n + 1): tot = sum(t_table[:, c]) for r in range(n): t_table[r, c] = t_table[r, c] / tot return t_table
python
def init_perfect_ttable(words): """initialize (normalized) theta according to whether words rhyme""" d = read_celex() not_in_dict = 0 n = len(words) t_table = numpy.zeros((n, n + 1)) # initialize P(c|r) accordingly for r, w in enumerate(words): if w not in d: not_in_dict += 1 for c, v in enumerate(words): if c < r: t_table[r, c] = t_table[c, r] elif w in d and v in d: t_table[r, c] = int(is_rhyme(d, w, v)) + 0.001 # for backoff else: t_table[r, c] = random.random() t_table[r, n] = random.random() # no estimate for P(r|no history) print(not_in_dict, "of", n, " words are not in CELEX") # normalize for c in range(n + 1): tot = sum(t_table[:, c]) for r in range(n): t_table[r, c] = t_table[r, c] / tot return t_table
[ "def", "init_perfect_ttable", "(", "words", ")", ":", "d", "=", "read_celex", "(", ")", "not_in_dict", "=", "0", "n", "=", "len", "(", "words", ")", "t_table", "=", "numpy", ".", "zeros", "(", "(", "n", ",", "n", "+", "1", ")", ")", "# initialize P...
initialize (normalized) theta according to whether words rhyme
[ "initialize", "(", "normalized", ")", "theta", "according", "to", "whether", "words", "rhyme" ]
train
https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/celex.py#L44-L74
eReuse/utils
ereuse_utils/nested_lookup.py
get_nested_dicts_with_key_value
def get_nested_dicts_with_key_value(parent_dict: dict, key, value): """Return all nested dictionaries that contain a key with a specific value. A sub-case of NestedLookup.""" references = [] NestedLookup(parent_dict, references, NestedLookup.key_value_equality_factory(key, value)) return (document for document, _ in references)
python
def get_nested_dicts_with_key_value(parent_dict: dict, key, value): """Return all nested dictionaries that contain a key with a specific value. A sub-case of NestedLookup.""" references = [] NestedLookup(parent_dict, references, NestedLookup.key_value_equality_factory(key, value)) return (document for document, _ in references)
[ "def", "get_nested_dicts_with_key_value", "(", "parent_dict", ":", "dict", ",", "key", ",", "value", ")", ":", "references", "=", "[", "]", "NestedLookup", "(", "parent_dict", ",", "references", ",", "NestedLookup", ".", "key_value_equality_factory", "(", "key", ...
Return all nested dictionaries that contain a key with a specific value. A sub-case of NestedLookup.
[ "Return", "all", "nested", "dictionaries", "that", "contain", "a", "key", "with", "a", "specific", "value", ".", "A", "sub", "-", "case", "of", "NestedLookup", "." ]
train
https://github.com/eReuse/utils/blob/989062e85095ea4e1204523fe0e298cf1046a01c/ereuse_utils/nested_lookup.py#L69-L73
eReuse/utils
ereuse_utils/nested_lookup.py
get_nested_dicts_with_key_containing_value
def get_nested_dicts_with_key_containing_value(parent_dict: dict, key, value): """Return all nested dictionaries that contain a key with a specific value. A sub-case of NestedLookup.""" references = [] NestedLookup(parent_dict, references, NestedLookup.key_value_containing_value_factory(key, value)) return (document for document, _ in references)
python
def get_nested_dicts_with_key_containing_value(parent_dict: dict, key, value): """Return all nested dictionaries that contain a key with a specific value. A sub-case of NestedLookup.""" references = [] NestedLookup(parent_dict, references, NestedLookup.key_value_containing_value_factory(key, value)) return (document for document, _ in references)
[ "def", "get_nested_dicts_with_key_containing_value", "(", "parent_dict", ":", "dict", ",", "key", ",", "value", ")", ":", "references", "=", "[", "]", "NestedLookup", "(", "parent_dict", ",", "references", ",", "NestedLookup", ".", "key_value_containing_value_factory"...
Return all nested dictionaries that contain a key with a specific value. A sub-case of NestedLookup.
[ "Return", "all", "nested", "dictionaries", "that", "contain", "a", "key", "with", "a", "specific", "value", ".", "A", "sub", "-", "case", "of", "NestedLookup", "." ]
train
https://github.com/eReuse/utils/blob/989062e85095ea4e1204523fe0e298cf1046a01c/ereuse_utils/nested_lookup.py#L76-L80
eReuse/utils
ereuse_utils/nested_lookup.py
NestedLookup._nested_lookup
def _nested_lookup(document, references, operation): """Lookup a key in a nested document, yield a value""" if isinstance(document, list): for d in document: for result in NestedLookup._nested_lookup(d, references, operation): yield result if isinstance(document, dict): for k, v in document.items(): if operation(k, v): references.append((document, k)) yield v elif isinstance(v, dict): for result in NestedLookup._nested_lookup(v, references, operation): yield result elif isinstance(v, list): for d in v: for result in NestedLookup._nested_lookup(d, references, operation): yield result
python
def _nested_lookup(document, references, operation): """Lookup a key in a nested document, yield a value""" if isinstance(document, list): for d in document: for result in NestedLookup._nested_lookup(d, references, operation): yield result if isinstance(document, dict): for k, v in document.items(): if operation(k, v): references.append((document, k)) yield v elif isinstance(v, dict): for result in NestedLookup._nested_lookup(v, references, operation): yield result elif isinstance(v, list): for d in v: for result in NestedLookup._nested_lookup(d, references, operation): yield result
[ "def", "_nested_lookup", "(", "document", ",", "references", ",", "operation", ")", ":", "if", "isinstance", "(", "document", ",", "list", ")", ":", "for", "d", "in", "document", ":", "for", "result", "in", "NestedLookup", ".", "_nested_lookup", "(", "d", ...
Lookup a key in a nested document, yield a value
[ "Lookup", "a", "key", "in", "a", "nested", "document", "yield", "a", "value" ]
train
https://github.com/eReuse/utils/blob/989062e85095ea4e1204523fe0e298cf1046a01c/ereuse_utils/nested_lookup.py#L41-L59
ajyoon/blur
examples/waves/waves.py
step_random_processes
def step_random_processes(oscillators): """ Args: oscillators (list): A list of oscillator.Oscillator objects to operate on Returns: None """ if not rand.prob_bool(0.01): return amp_bias_weights = [(0.001, 1), (0.1, 100), (0.15, 40), (1, 0)] # Find out how many oscillators should move num_moves = iching.get_hexagram('NAIVE') % len(oscillators) for i in range(num_moves): pair = [gram % len(oscillators) for gram in iching.get_hexagram('THREE COIN')] amplitudes = [(gram / 64) * rand.weighted_rand(amp_bias_weights) for gram in iching.get_hexagram('THREE COIN')] oscillators[pair[0]].amplitude.drift_target = amplitudes[0] oscillators[pair[1]].amplitude.drift_target = amplitudes[1]
python
def step_random_processes(oscillators): """ Args: oscillators (list): A list of oscillator.Oscillator objects to operate on Returns: None """ if not rand.prob_bool(0.01): return amp_bias_weights = [(0.001, 1), (0.1, 100), (0.15, 40), (1, 0)] # Find out how many oscillators should move num_moves = iching.get_hexagram('NAIVE') % len(oscillators) for i in range(num_moves): pair = [gram % len(oscillators) for gram in iching.get_hexagram('THREE COIN')] amplitudes = [(gram / 64) * rand.weighted_rand(amp_bias_weights) for gram in iching.get_hexagram('THREE COIN')] oscillators[pair[0]].amplitude.drift_target = amplitudes[0] oscillators[pair[1]].amplitude.drift_target = amplitudes[1]
[ "def", "step_random_processes", "(", "oscillators", ")", ":", "if", "not", "rand", ".", "prob_bool", "(", "0.01", ")", ":", "return", "amp_bias_weights", "=", "[", "(", "0.001", ",", "1", ")", ",", "(", "0.1", ",", "100", ")", ",", "(", "0.15", ",", ...
Args: oscillators (list): A list of oscillator.Oscillator objects to operate on Returns: None
[ "Args", ":", "oscillators", "(", "list", ")", ":", "A", "list", "of", "oscillator", ".", "Oscillator", "objects", "to", "operate", "on" ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/examples/waves/waves.py#L156-L175
ajyoon/blur
examples/waves/waves.py
build_chunk
def build_chunk(oscillators): """ Build an audio chunk and progress the oscillator states. Args: oscillators (list): A list of oscillator.Oscillator objects to build chunks from Returns: str: a string of audio sample bytes ready to be written to a wave file """ step_random_processes(oscillators) subchunks = [] for osc in oscillators: osc.amplitude.step_amp() osc_chunk = osc.get_samples(config.CHUNK_SIZE) if osc_chunk is not None: subchunks.append(osc_chunk) if len(subchunks): new_chunk = sum(subchunks) else: new_chunk = numpy.zeros(config.CHUNK_SIZE) # If we exceed the maximum amplitude, handle it gracefully chunk_amplitude = amplitude.find_amplitude(new_chunk) if chunk_amplitude > config.MAX_AMPLITUDE: # Normalize the amplitude chunk to mitigate immediate clipping new_chunk = amplitude.normalize_amplitude(new_chunk, config.MAX_AMPLITUDE) # Pick some of the offending oscillators (and some random others) # and lower their drift targets avg_amp = (sum(osc.amplitude.value for osc in oscillators) / len(oscillators)) for osc in oscillators: if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or rand.prob_bool(0.01)): osc.amplitude.drift_target = rand.weighted_rand( [(-5, 1), (0, 10)]) osc.amplitude.change_rate = rand.weighted_rand( osc.amplitude.change_rate_weights) return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()
python
def build_chunk(oscillators): """ Build an audio chunk and progress the oscillator states. Args: oscillators (list): A list of oscillator.Oscillator objects to build chunks from Returns: str: a string of audio sample bytes ready to be written to a wave file """ step_random_processes(oscillators) subchunks = [] for osc in oscillators: osc.amplitude.step_amp() osc_chunk = osc.get_samples(config.CHUNK_SIZE) if osc_chunk is not None: subchunks.append(osc_chunk) if len(subchunks): new_chunk = sum(subchunks) else: new_chunk = numpy.zeros(config.CHUNK_SIZE) # If we exceed the maximum amplitude, handle it gracefully chunk_amplitude = amplitude.find_amplitude(new_chunk) if chunk_amplitude > config.MAX_AMPLITUDE: # Normalize the amplitude chunk to mitigate immediate clipping new_chunk = amplitude.normalize_amplitude(new_chunk, config.MAX_AMPLITUDE) # Pick some of the offending oscillators (and some random others) # and lower their drift targets avg_amp = (sum(osc.amplitude.value for osc in oscillators) / len(oscillators)) for osc in oscillators: if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or rand.prob_bool(0.01)): osc.amplitude.drift_target = rand.weighted_rand( [(-5, 1), (0, 10)]) osc.amplitude.change_rate = rand.weighted_rand( osc.amplitude.change_rate_weights) return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()
[ "def", "build_chunk", "(", "oscillators", ")", ":", "step_random_processes", "(", "oscillators", ")", "subchunks", "=", "[", "]", "for", "osc", "in", "oscillators", ":", "osc", ".", "amplitude", ".", "step_amp", "(", ")", "osc_chunk", "=", "osc", ".", "get...
Build an audio chunk and progress the oscillator states. Args: oscillators (list): A list of oscillator.Oscillator objects to build chunks from Returns: str: a string of audio sample bytes ready to be written to a wave file
[ "Build", "an", "audio", "chunk", "and", "progress", "the", "oscillator", "states", "." ]
train
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/examples/waves/waves.py#L182-L221
concordusapps/alchemist
alchemist/db/operations/shell.py
shell
def shell(database='default'): """Runs the command-line client for the specified database. """ target = engine[database] dialect = engine[database].dialect.name if dialect == 'mysql': args = ['mysql'] if target.url.username: args += ['--user=%s' % target.url.username] if target.url.password: args += ['--password=%s' % target.url.password] if 'unix_socket' in target.url.query: args += ["--socket=%s" % target.url.query['unix_socket']] elif target.url.host: args += ["--host=%s" % target.url.host] if target.url.port: args += ["--port=%s" % target.url.port] if target.url.database: args += [target.url.database] elif dialect == 'sqlite': args = ['sqlite3', target.url.database] else: # pragma: nocoverage raise RuntimeError( 'Database shell not available for the dialect %r' % dialect) os.execvp(args[0], args)
python
def shell(database='default'): """Runs the command-line client for the specified database. """ target = engine[database] dialect = engine[database].dialect.name if dialect == 'mysql': args = ['mysql'] if target.url.username: args += ['--user=%s' % target.url.username] if target.url.password: args += ['--password=%s' % target.url.password] if 'unix_socket' in target.url.query: args += ["--socket=%s" % target.url.query['unix_socket']] elif target.url.host: args += ["--host=%s" % target.url.host] if target.url.port: args += ["--port=%s" % target.url.port] if target.url.database: args += [target.url.database] elif dialect == 'sqlite': args = ['sqlite3', target.url.database] else: # pragma: nocoverage raise RuntimeError( 'Database shell not available for the dialect %r' % dialect) os.execvp(args[0], args)
[ "def", "shell", "(", "database", "=", "'default'", ")", ":", "target", "=", "engine", "[", "database", "]", "dialect", "=", "engine", "[", "database", "]", ".", "dialect", ".", "name", "if", "dialect", "==", "'mysql'", ":", "args", "=", "[", "'mysql'",...
Runs the command-line client for the specified database.
[ "Runs", "the", "command", "-", "line", "client", "for", "the", "specified", "database", "." ]
train
https://github.com/concordusapps/alchemist/blob/822571366271b5dca0ac8bf41df988c6a3b61432/alchemist/db/operations/shell.py#L7-L42
funkybob/knights-templater
astpp.py
parseprint
def parseprint(code, filename="<string>", mode="exec", **kwargs): """Parse some code from a string and pretty-print it.""" node = parse(code, mode=mode) # An ode to the code print(dump(node, **kwargs))
python
def parseprint(code, filename="<string>", mode="exec", **kwargs): """Parse some code from a string and pretty-print it.""" node = parse(code, mode=mode) # An ode to the code print(dump(node, **kwargs))
[ "def", "parseprint", "(", "code", ",", "filename", "=", "\"<string>\"", ",", "mode", "=", "\"exec\"", ",", "*", "*", "kwargs", ")", ":", "node", "=", "parse", "(", "code", ",", "mode", "=", "mode", ")", "# An ode to the code", "print", "(", "dump", "("...
Parse some code from a string and pretty-print it.
[ "Parse", "some", "code", "from", "a", "string", "and", "pretty", "-", "print", "it", "." ]
train
https://github.com/funkybob/knights-templater/blob/b15cdbaae7d824d02f7f03ca04599ae94bb759dd/astpp.py#L49-L52
klahnakoski/mo-times
mo_times/vendor/dateutil/tzwin.py
picknthweekday
def picknthweekday(year, month, dayofweek, hour, minute, whichweek): """dayofweek == 0 means Sunday, whichweek 5 means last instance""" first = datetime.datetime(year, month, 1, hour, minute) weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) for n in range(whichweek): dt = weekdayone+(whichweek-n)*ONEWEEK if dt.month == month: return dt
python
def picknthweekday(year, month, dayofweek, hour, minute, whichweek): """dayofweek == 0 means Sunday, whichweek 5 means last instance""" first = datetime.datetime(year, month, 1, hour, minute) weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) for n in range(whichweek): dt = weekdayone+(whichweek-n)*ONEWEEK if dt.month == month: return dt
[ "def", "picknthweekday", "(", "year", ",", "month", ",", "dayofweek", ",", "hour", ",", "minute", ",", "whichweek", ")", ":", "first", "=", "datetime", ".", "datetime", "(", "year", ",", "month", ",", "1", ",", "hour", ",", "minute", ")", "weekdayone",...
dayofweek == 0 means Sunday, whichweek 5 means last instance
[ "dayofweek", "==", "0", "means", "Sunday", "whichweek", "5", "means", "last", "instance" ]
train
https://github.com/klahnakoski/mo-times/blob/e64a720b9796e076adeb0d5773ec6915ca045b9d/mo_times/vendor/dateutil/tzwin.py#L163-L170