docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Print a variant.
If a result file is provided the variante will be appended to the file,
otherwise they are printed to stdout.
Args:
variants_file (str): A string with the path to a file
outfile (FileHandle): An opened file_handle
silent (bool): Bool. If nothing should be ... | def print_variant(variant_line, outfile=None, silent=False):
variant_line = variant_line.rstrip()
if not variant_line.startswith('#'):
if outfile:
outfile.write(variant_line+'\n')
else:
if not silent:
print(variant_line)
return | 1,038,783 |
Create a list of file lines from a given filepath.
Args:
path (str): File path
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list | def lines_from_file(path, as_interned=False, encoding=None):
lines = None
with io.open(path, encoding=encoding) as f:
if as_interned:
lines = [sys.intern(line) for line in f.read().splitlines()]
else:
lines = f.read().splitlines()
return lines | 1,038,784 |
Create a list of file lines from a given file stream.
Args:
f (io.TextIOWrapper): File stream
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list | def lines_from_stream(f, as_interned=False):
if as_interned:
return [sys.intern(line) for line in f.read().splitlines()]
return f.read().splitlines() | 1,038,785 |
Create a list of file lines from a given string.
Args:
string (str): File string
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list | def lines_from_string(string, as_interned=False):
if as_interned:
return [sys.intern(line) for line in string.splitlines()]
return string.splitlines() | 1,038,786 |
Perform formatting and write the formatted string to a file or stdout.
Optional arguments can be used to format the editor's contents. If no
file path is given, prints to standard output.
Args:
path (str): Full file path (default None, prints to stdout)
*args: Positiona... | def write(self, path=None, *args, **kwargs):
if path is None:
print(self.format(*args, **kwargs))
else:
with io.open(path, 'w', newline="") as f:
f.write(self.format(*args, **kwargs)) | 1,038,787 |
Format the string representation of the editor.
Args:
inplace (bool): If True, overwrite editor's contents with formatted contents | def format(self, *args, **kwargs):
inplace = kwargs.pop("inplace", False)
if not inplace:
return str(self).format(*args, **kwargs)
self._lines = str(self).format(*args, **kwargs).splitlines() | 1,038,788 |
Display the top of the file.
Args:
n (int): Number of lines to display | def head(self, n=10):
r = self.__repr__().split('\n')
print('\n'.join(r[:n]), end=' ') | 1,038,789 |
Insert lines into the editor.
Note:
To insert before the first line, use :func:`~exa.core.editor.Editor.preappend`
(or key 0); to insert after the last line use :func:`~exa.core.editor.Editor.append`.
Args:
lines (dict): Dictionary of lines of form (lineno, string) ... | def insert(self, lines=None):
for i, (key, line) in enumerate(lines.items()):
n = key + i
first_half = self._lines[:n]
last_half = self._lines[n:]
self._lines = first_half + [line] + last_half | 1,038,791 |
Delete all lines with given line numbers.
Args:
lines (list): List of integers corresponding to line numbers to delete | def delete_lines(self, lines):
for k, i in enumerate(lines):
del self[i-k] | 1,038,794 |
From the editor's current cursor position find the next instance of the
given string.
Args:
strings (iterable): String or strings to search for
Returns:
tup (tuple): Tuple of cursor position and line or None if not found
Note:
This function cycles t... | def find_next(self, *strings, **kwargs):
start = kwargs.pop("start", None)
keys_only = kwargs.pop("keys_only", False)
staht = start if start is not None else self.cursor
for start, stop in [(staht, len(self)), (0, staht)]:
for i in range(start, stop):
... | 1,038,796 |
Search the editor for lines matching the regular expression.
re.MULTILINE is not currently supported.
Args:
\*patterns: Regular expressions to search each line for
keys_only (bool): Only return keys
flags (re.FLAG): flags passed to re.search
Returns:
... | def regex(self, *patterns, **kwargs):
start = kwargs.pop("start", 0)
stop = kwargs.pop("stop", None)
keys_only = kwargs.pop("keys_only", False)
flags = kwargs.pop("flags", 0)
results = {pattern: [] for pattern in patterns}
stop = stop if stop is not None else -1
... | 1,038,797 |
Replace all instances of a pattern with a replacement.
Args:
pattern (str): Pattern to replace
replacement (str): Text to insert | def replace(self, pattern, replacement):
for i, line in enumerate(self):
if pattern in line:
self[i] = line.replace(pattern, replacement) | 1,038,798 |
Returns the result of tab-separated pandas.read_csv on
a subset of the file.
Args:
start (int): line number where structured data starts
stop (int): line number where structured data stops
ncol (int or list): the number of columns in the structured
da... | def pandas_dataframe(self, start, stop, ncol, **kwargs):
try:
int(start)
int(stop)
except TypeError:
print('start and stop must be ints')
try:
ncol = int(ncol)
return pd.read_csv(six.StringIO('\n'.join(self[start:stop])), delim... | 1,038,799 |
Determines whether a type is a List[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a List[...something...]. | def is_generic_list(type_: Type) -> bool:
if hasattr(typing, '_GenericAlias'):
# 3.7
return (isinstance(type_, typing._GenericAlias) and # type: ignore
type_.__origin__ is list)
else:
# 3.6 and earlier
return (isinstance(type_, typing.GenericMeta) and
... | 1,038,808 |
Determines whether a type is a Dict[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Dict[...something...]. | def is_generic_dict(type_: Type) -> bool:
if hasattr(typing, '_GenericAlias'):
# 3.7
return (isinstance(type_, typing._GenericAlias) and # type: ignore
type_.__origin__ is dict)
else:
# 3.6 and earlier
return (isinstance(type_, typing.GenericMeta) and
... | 1,038,809 |
Determines whether a type is a Union[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Union[...something...]. | def is_generic_union(type_: Type) -> bool:
if hasattr(typing, '_GenericAlias'):
# 3.7
return (isinstance(type_, typing._GenericAlias) and # type: ignore
type_.__origin__ is Union)
else:
if hasattr(typing, '_Union'):
# 3.6
return isinstance... | 1,038,810 |
Gets the type argument list for the given generic type.
If you give this function List[int], it will return [int], and
if you give it Union[int, str] it will give you [int, str]. Note
that on Python < 3.7, Union[int, bool] collapses to Union[int] and
then to int; this is already done by the time this f... | def generic_type_args(type_: Type) -> List[Type]:
if hasattr(type_, '__union_params__'):
# 3.5 Union
return list(type_.__union_params__)
return list(type_.__args__) | 1,038,811 |
Convert a type to a human-readable description.
This is used for generating nice error messages. We want users \
to see a nice readable text, rather than something like \
"typing.List<~T>[str]".
Args:
type_: The type to represent.
Returns:
A human-readable description. | def type_to_desc(type_: Type) -> str:
scalar_type_to_str = {
str: 'string',
int: 'int',
float: 'float',
bool: 'boolean',
None: 'null value',
type(None): 'null value'
}
if type_ in scalar_type_to_str:
return scalar_type_to_str[type_]
if is_ge... | 1,038,812 |
Set the type corresponding to the whole document.
Args:
loader_cls: The loader class to set the document type for.
type_: The type to loader should process the document into. | def set_document_type(loader_cls: Type, type_: Type) -> None:
loader_cls.document_type = type_
if not hasattr(loader_cls, '_registered_classes'):
loader_cls._registered_classes = dict() | 1,038,848 |
Registers one or more classes with a YAtiML loader.
Once a class has been registered, it can be recognized and \
constructed when reading a YAML text.
Args:
loader_cls: The loader to register the classes with.
classes: The class(es) to register, a plain Python class or a \
... | def add_to_loader(loader_cls: Type, classes: List[Type]) -> None:
if not isinstance(classes, list):
classes = [classes] # type: ignore
for class_ in classes:
tag = '!{}'.format(class_.__name__)
if issubclass(class_, enum.Enum):
loader_cls.add_constructor(tag, EnumConst... | 1,038,849 |
Convert a type to the corresponding YAML tag.
Args:
type_: The type to convert
Returns:
A string containing the YAML tag. | def __type_to_tag(self, type_: Type) -> str:
if type_ in scalar_type_to_tag:
return scalar_type_to_tag[type_]
if is_generic_list(type_):
return 'tag:yaml.org,2002:seq'
if is_generic_dict(type_):
return 'tag:yaml.org,2002:map'
if type_ in se... | 1,038,852 |
Removes syntactic sugar from the node.
This calls yatiml_savorize(), first on the class's base \
classes, then on the class itself.
Args:
node: The node to modify.
expected_type: The type to assume this type is. | def __savorize(self, node: yaml.Node, expected_type: Type) -> yaml.Node:
logger.debug('Savorizing node assuming type {}'.format(
expected_type.__name__))
for base_class in expected_type.__bases__:
if base_class in self._registered_classes.values():
node ... | 1,038,853 |
Processes a node.
This is the main function that implements yatiml's \
functionality. It figures out how to interpret this node \
(recognition), then applies syntactic sugar, and finally \
recurses to the subnodes, if any.
Args:
node: The node to process.
... | def __process_node(self, node: yaml.Node,
expected_type: Type) -> yaml.Node:
logger.info('Processing node {} expecting type {}'.format(
node, expected_type))
# figure out how to interpret this node
recognized_types, message = self.__recognizer.recogni... | 1,038,854 |
Initialize.
Args:
campfire (:class:`Campfire`): Campfire instance
password (str): Room ID | def __init__(self, campfire, id):
super(Room, self).__init__(campfire)
self._load(id) | 1,039,071 |
Set the room name.
Args:
name (str): Name
Returns:
bool. Success | def set_name(self, name):
if not self._campfire.get_user().admin:
return False
result = self._connection.put("room/%s" % self.id, {"room": {"name": name}})
if result["success"]:
self._load()
return result["success"] | 1,039,076 |
Set the room topic.
Args:
topic (str): Topic
Returns:
bool. Success | def set_topic(self, topic):
if not topic:
topic = ''
result = self._connection.put("room/%s" % self.id, {"room": {"topic": topic}})
if result["success"]:
self._load()
return result["success"] | 1,039,077 |
Post a message.
Args:
message (:class:`Message` or string): Message
Returns:
bool. Success | def speak(self, message):
campfire = self.get_campfire()
if not isinstance(message, Message):
message = Message(campfire, message)
result = self._connection.post(
"room/%s/speak" % self.id,
{"message": message.get_data()},
parse_data=True... | 1,039,078 |
Parses a unit file and updates self._data['options']
Args:
file_handle (file): a file-like object (supporting read()) containing a unit
Returns:
True: The file was successfuly parsed and options were updated
Raises:
IOError: from_file was specified and it d... | def _set_options_from_file(self, file_handle):
# TODO: Find a library to handle this unit file parsing
# Can't use configparser, it doesn't handle multiple entries for the same key in the same section
# This is terribly naive
# build our output here
options = []
... | 1,039,269 |
Add an option to a section of the unit file
Args:
section (str): The name of the section, If it doesn't exist it will be created
name (str): The name of the option to add
value (str): The value of the option
Returns:
True: The item was added | def add_option(self, section, name, value):
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options')
option = {
'section': section,
'name': name,
... | 1,039,270 |
Remove an option from a unit
Args:
section (str): The section to remove from.
name (str): The item to remove.
value (str, optional): If specified, only the option matching this value will be removed
If not specified, all options with ``name... | def remove_option(self, section, name, value=None):
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options')
removed = 0
# iterate through a copy of the options
... | 1,039,271 |
Update the desired state of a unit.
Args:
state (str): The desired state for the unit, must be one of ``_STATES``
Returns:
str: The updated state
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
ValueError: An invalid val... | def set_desired_state(self, state):
if state not in self._STATES:
raise ValueError(
'state must be one of: {0}'.format(
self._STATES
))
# update our internal structure
self._data['desiredState'] = state
# if we ha... | 1,039,273 |
Initializes a BIP32 wallet.
Addresses returned by the wallet are of the form ``(path, address)``.
Args:
password (bytes): Master secret for the wallet. The password can
also be passed as a string (``str``).
testnet (bool): Wwether to use the bitcoin testnet or m... | def __init__(self, password, testnet=False):
netcode = 'XTN' if testnet else 'BTC'
if isinstance(password, str):
password = password.encode()
self.wallet = BIP32Node.from_master_secret(password, netcode=netcode)
self.root_address = ('', self.wallet.address()) | 1,039,466 |
Walk over a scope tree and mangle symbol names.
Args:
toplevel: Defines if global scope should be mangled or not. | def mangle_scope_tree(root, toplevel):
def mangle(scope):
# don't mangle global scope if not specified otherwise
if scope.get_enclosing_scope() is None and not toplevel:
return
for name in scope.symbols:
mangled_name = scope.get_next_mangled_name()
sc... | 1,039,810 |
Actually serialize input.
Args:
struct: structure to serialize to
fmt: format to serialize to
encoding: encoding to use while serializing
Returns:
encoded serialized structure
Raises:
various sorts of errors raised by libraries while serializing | def _do_serialize(struct, fmt, encoding):
res = None
_check_lib_installed(fmt, 'serialize')
if fmt == 'ini':
config = configobj.ConfigObj(encoding=encoding)
for k, v in struct.items():
config[k] = v
res = b'\n'.join(config.write())
elif fmt in ['json', 'json5']:... | 1,040,014 |
Try to guess format of given bytestring.
Args:
inp: byte string to guess format of
Returns:
guessed format | def _guess_fmt_from_bytes(inp):
stripped = inp.strip()
fmt = None
ini_section_header_re = re.compile(b'^\[([\w-]+)\]')
if len(stripped) == 0:
# this can be anything, so choose yaml, for example
fmt = 'yaml'
else:
if stripped.startswith(b'<'):
fmt = 'xml'
... | 1,040,018 |
Wrapper for all errors that occur during anymarkup calls.
Args:
cause: either a reraised exception or a string with cause | def __init__(self, cause, original_tb=''):
super(AnyMarkupError, self).__init__()
self.cause = cause
self.original_tb = original_tb | 1,040,022 |
Find closest station from the new(er) list.
Warning: There may be some errors with smaller non US stations.
Args:
latitude (float)
longitude (float)
Returns:
tuple (station_code (str), station_name (str)) | def closest_eere(latitude, longitude):
with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta:
stations = csv.DictReader(eere_meta)
d = 9999
station_code = ''
station_name = ''
for station in stations:
new_dist = great_circle((latitude, longitude),
... | 1,040,471 |
Station information.
Args:
station_code (str): station code.
Returns (dict): station information | def eere_station(station_code):
with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta:
stations = csv.DictReader(eere_meta)
for station in stations:
if station['station_code'] == station_code:
return station
raise KeyError('station not found') | 1,040,472 |
Setup an HTTP connection over an already connected socket.
Args:
host: ignored (exists for compatibility with parent)
post: ignored (exists for compatibility with parent)
strict: ignored (exists for compatibility with parent)
timeout: ignored ... | def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
# do the needful
httplib.HTTPConnection.__init__(self, host, port)
# looks like the python2 and python3 versions of httplib differ
# python2, executables any callables and returns the result as pr... | 1,040,480 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.Classify = channel.unary_unary(
'/tensorflow.serving.PredictionService/Classify',
request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_cla... | 1,040,624 |
Open a connection to host:port via an ssh tunnel.
Args:
host (str): The host to connect to.
port (int): The port to connect to.
Returns:
A socket-like object that is connected to the provided host:port. | def forward_tcp(self, host, port):
return self.transport.open_channel(
'direct-tcpip',
(host, port),
self.transport.getpeername()
) | 1,041,131 |
Split a string in the format of '<host>:<port>' into it's component parts
default_port will be used if a port is not included in the string
Args:
str ('<host>' or '<host>:<port>'): A string to split into it's parts
Returns:
two item tuple: (host, port)
Raises:... | def _split_hostport(self, hostport, default_port=None):
try:
(host, port) = hostport.split(':', 1)
except ValueError: # no colon in the string so make our own port
host = hostport
if default_port is None:
raise ValueError('No port found in ... | 1,041,133 |
Convert a URL into a host / port, or into a path to a unix domain socket
Args:
endpoint (str): A URL parsable by urlparse
Returns:
3 item tuple: (host, port, path).
host and port will None, and path will be not None if a a unix domain socket URL is passed
... | def _endpoint_to_target(self, endpoint):
parsed = urlparse.urlparse(endpoint)
scheme = parsed[0]
hostport = parsed[1]
if 'unix' in scheme:
return (None, None, unquote(hostport))
if scheme == 'https':
target_port = 443
else:
t... | 1,041,134 |
Generate a ProxyInfo class from a connected SSH transport
Args:
_ (None): Ignored. This is just here as the ProxyInfo spec requires it.
Returns:
SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH | def _get_proxy_info(self, _=None):
# parse the fleet endpoint url, to establish a tunnel to that host
(target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint)
# implement the proxy_info interface from httplib which requires
# that we accept a scheme, a... | 1,041,135 |
Make a single request to the fleet API endpoint
Args:
method (str): A dot delimited string indicating the method to call. Example: 'Machines.List'
*args: Passed directly to the method being called.
**kwargs: Passed directly to the method being called.
Returns:
... | def _single_request(self, method, *args, **kwargs):
# The auto generated client binding require instantiating each object you want to call a method on
# For example to make a request to /machines for the list of machines you would do:
# self._service.Machines().List(**kwargs)
#... | 1,041,136 |
Delete a unit from the cluster
Args:
unit (str, Unit): The Unit, or name of the unit to delete
Returns:
True: The unit was deleted
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400 | def destroy_unit(self, unit):
# if we are given an object, grab it's name property
# otherwise, convert to unicode
if isinstance(unit, Unit):
unit = unit.name
else:
unit = str(unit)
self._single_request('Units.Delete', unitName=unit)
ret... | 1,041,140 |
Retreive a specifi unit from the fleet cluster by name
Args:
name (str): If specified, only this unit name is returned
Returns:
Unit: The unit identified by ``name`` in the fleet cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 40... | def get_unit(self, name):
return Unit(client=self, data=self._single_request('Units.Get', unitName=name)) | 1,041,142 |
Return the current UnitState for the fleet cluster
Args:
machine_id (str): filter all UnitState objects to those
originating from a specific machine
unit_name (str): filter all UnitState objects to those related
to a specific... | def list_unit_states(self, machine_id=None, unit_name=None):
for page in self._request('UnitState.List', machineID=machine_id, unitName=unit_name):
for state in page.get('states', []):
yield UnitState(data=state) | 1,041,143 |
Basename for USAF base.
Args:
usaf (str): USAF code
Returns:
(str) | def tmybasename(usaf):
url_file = open(env.SRC_PATH + '/tmy3.csv')
for line in url_file.readlines():
if line.find(usaf) is not -1:
return line.rstrip().partition(',')[0] | 1,042,281 |
change TMY3 date to an arbitrary year.
Args:
tmy_date (datetime): date to mangle.
year (int): desired year.
Returns:
(None) | def normalize_date(tmy_date, year):
month = tmy_date.month
day = tmy_date.day - 1
hour = tmy_date.hour
# hack to get around 24:00 notation
if month is 1 and day is 0 and hour is 0:
year = year + 1
return datetime.datetime(year, month, 1) + \
datetime.timedelta(days=day, hour... | 1,042,282 |
initialize.
Args:
usaf (str)
Returns:
(object) | def __init__(self, usaf):
filename = env.WEATHER_DATA_PATH + '/' + usaf + 'TYA.csv'
self.csvfile = None
try:
self.csvfile = open(filename)
except IOError:
logger.info("%s not found", filename)
download(_tmy_url(usaf), filename)
sel... | 1,042,284 |
Create a connection with given settings.
Args:
settings (dict): A dictionary of settings
Returns:
:class:`Connection`. The connection | def create_from_settings(settings):
return Connection(
settings["url"],
settings["base_url"],
settings["user"],
settings["password"],
authorizations = settings["authorizations"],
debug = settings["debug"]
) | 1,042,609 |
Parses a response.
Args:
text (str): Text to parse
Kwargs:
key (str): Key to look for, if any
Returns:
Parsed value
Raises:
ValueError | def parse(self, text, key=None):
try:
data = json.loads(text)
except ValueError as e:
raise ValueError("%s: Value: [%s]" % (e, text))
if data and key:
if key not in data:
raise ValueError("Invalid response (key %s not found): %s" % (k... | 1,042,615 |
Initialize.
Args:
campfire (:class:`Campfire`): Campfire instance
data (dict or str): If string, message type will be set to either paste or text | def __init__(self, campfire, data):
dataType = type(data)
if dataType == types.StringType or dataType == types.UnicodeType:
messageType = self._TYPE_PASTE if data.find("\n") >= 0 else self._TYPE_TEXT
if messageType == self._TYPE_TEXT:
matches = re.match("... | 1,042,684 |
Initialize.
Args:
subdomain (str): Campfire subdomain
username (str): User
password (str): pasword
Kwargs:
ssl (bool): enabled status of SSL
currentUser (:class:`User`): If specified, don't auto load current user, use this one instead | def __init__(self, subdomain, username, password, ssl=False, currentUser=None):
self.base_url = "http%s://%s.campfirenow.com" % ("s" if ssl else "", subdomain)
self._settings = {
"subdomain": subdomain,
"username": username,
"password": password,
... | 1,042,954 |
Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages | def search(self, terms):
messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages")
if messages:
messages = [Message(self, message) for message in messages]
return messages | 1,042,960 |
Initialize.
Args:
campfire (:class:`Campfire`): Campfire instance
id (str): User ID
Kwargs:
current (bool): Wether user is current user, or not | def __init__(self, campfire, id, current=False):
super(User, self).__init__(campfire)
self.set_data(self._connection.get("users/%s" % id, key="user"))
self.current = current | 1,043,870 |
Attach an observer.
Args:
observer (func): A function to be called when new messages arrive
Returns:
:class:`Stream`. Current instance to allow chaining | def attach(self, observer):
if not observer in self._observers:
self._observers.append(observer)
return self | 1,044,088 |
Called when incoming messages arrive.
Args:
messages (tuple): Messages (each message is a dict) | def incoming(self, messages):
if self._observers:
campfire = self._room.get_campfire()
for message in messages:
for observer in self._observers:
observer(Message(campfire, message)) | 1,044,089 |
Initialize.
Args:
settings (dict): Settings used to create a :class:`Connection` instance
room_id (int): Room ID
Kwargs:
pause (int): Pause in seconds between requests | def __init__(self, settings, room_id, pause=1):
Process.__init__(self)
self._pause = pause
self._room_id = room_id
self._callback = None
self._queue = None
self._connection = Connection.create_from_settings(settings)
self._last_message_id = None | 1,044,091 |
Called when new messages arrive.
Args:
messages (tuple): Messages | def received(self, messages):
if messages:
if self._queue:
self._queue.put_nowait(messages)
if self._callback:
self._callback(messages) | 1,044,094 |
Initialize.
Args:
settings (dict): Settings used to create a :class:`Connection` instance
room_id (int): Room ID | def __init__(self, settings, room_id):
StreamProcess.__init__(self, settings, room_id)
self._reactor = self._connection.get_twisted_reactor()
self._protocol = None | 1,044,095 |
Callback issued by twisted when new line arrives.
Args:
line (str): Incoming line | def lineReceived(self, line):
while self._in_header:
if line:
self._headers.append(line)
else:
http, status, message = self._headers[0].split(" ", 2)
status = int(status)
if status == 200:
self.f... | 1,044,100 |
Process data.
Args:
data (str): Incoming data | def rawDataReceived(self, data):
if self._len_expected is not None:
data, extra = data[:self._len_expected], data[self._len_expected:]
self._len_expected -= len(data)
else:
extra = ""
self._buffer += data
if self._len_expected == 0:
... | 1,044,101 |
Constructs a :class:`Spoolverb` instance from the string
representation of the given verb.
Args:
verb (str): representation of the verb e.g.:
``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in
binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``.
... | def from_verb(cls, verb):
pattern = r'^(?P<meta>[A-Z]+)(?P<version>\d+)(?P<action>[A-Z]+)(?P<arg1>\d+)?(\/(?P<arg2>\d+))?$'
try:
verb = verb.decode()
except AttributeError:
pass
match = re.match(pattern, verb)
if not match:
raise Spool... | 1,044,679 |
Selects the inputs for the spool transaction.
Args:
address (str): bitcoin address to select inputs for
nfees (int): number of fees
ntokens (int): number of tokens
min_confirmations (Optional[int]): minimum number of required
confirmations; defaul... | def select_inputs(self, address, nfees, ntokens, min_confirmations=6):
unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents']
unspents = [u for u in unspents if u not in self._spents.queue]
if len(unspents) == 0:
raise Exception("No spendable output... | 1,044,742 |
Start producing.
Args:
consumer: Consumer | def startProducing(self, consumer):
self._consumer = consumer
self._current_deferred = defer.Deferred()
self._sent = 0
self._paused = False
if not hasattr(self, "_chunk_headers"):
self._build_chunk_headers()
if self._data:
block = ""
... | 1,045,746 |
Send a block of bytes to the consumer.
Args:
block (str): Block of bytes | def _send_to_consumer(self, block):
self._consumer.write(block)
self._sent += len(block)
if self._callback:
self._callback(self._sent, self.length) | 1,045,751 |
Returns the header of the encoding of this parameter.
Args:
name (str): Field name
Kwargs:
is_file (bool): If true, this is a file field
Returns:
array. Headers | def _headers(self, name, is_file=False):
value = self._files[name] if is_file else self._data[name]
_boundary = self.boundary.encode("utf-8") if isinstance(self.boundary, unicode) else urllib.quote_plus(self.boundary)
headers = ["--%s" % _boundary]
if is_file:
d... | 1,045,754 |
Returns file type for given file field.
Args:
field (str): File field
Returns:
string. File type | def _file_type(self, field):
type = mimetypes.guess_type(self._files[field])[0]
return type.encode("utf-8") if isinstance(type, unicode) else str(type) | 1,045,756 |
Returns the file size for given file field.
Args:
field (str): File field
Returns:
int. File size | def _file_size(self, field):
size = 0
try:
handle = open(self._files[field], "r")
size = os.fstat(handle.fileno()).st_size
handle.close()
except:
size = 0
self._file_lengths[field] = size
return self._file_lengths[field] | 1,045,757 |
时间戳转换为日期字符串
Args:
ts: 待转换的时间戳
dt_format: 目标日期字符串格式
Returns: 日期字符串 | def ts_to_dt_str(ts, dt_format='%Y-%m-%d %H:%M:%S'):
return datetime.datetime.fromtimestamp(int(ts)).strftime(dt_format) | 1,045,856 |
Two percent high design temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius | def twopercent(station_code):
# (DB=>MWB) 2%, MaxDB=
temp = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
for line in fin:
value = re.search(, line)
if value:
temp = float(val... | 1,046,483 |
Extreme Minimum Design Temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius | def minimum(station_code):
temp = None
fin = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
except IOError:
logger.info("File not found")
download_extract(_eere_url(station_code))
fin = open('%s/%... | 1,046,484 |
Data for a weather station.
Args:
station_code (str): Station code of weather station
DST (bool): Weather timestands in daylight savings. Default False | def __init__(self, station_code, DST=False):
filename = env.WEATHER_DATA_PATH + '/' + _basename(station_code)
self.csvfile = None
try:
self.csvfile = open(filename)
except IOError:
logger.info("File not found")
download_extract(_eere_url(stati... | 1,046,485 |
Apply selector to transform each object in objs.
This operates in-place on objs. Empty objects are removed from the list.
Args:
mode: either KEEP (to keep selected items & their ancestors) or DELETE
(to delete selected items and their children). | def apply_filter(objs, selector, mode):
indices_to_delete = []
presumption = DELETE if mode == KEEP else KEEP
for i, obj in enumerate(objs):
timer.log('Applying selector: %s' % selector)
marks = {k: mode for k in selector_to_ids(selector, obj, mode)}
timer.log('done applying sel... | 1,046,514 |
Retrieve the ownership tree of all editions of a piece given the hash.
Args:
hash (str): Hash of the file to check. Can be created with the
:class:`File` class
Returns:
dict: Ownsership tree of all editions of a piece.
.. note:: For now we only support ... | def history(self, hash):
txs = self._t.get(hash, max_transactions=10000)['transactions']
tree = defaultdict(list)
number_editions = 0
for tx in txs:
_tx = self._t.get(tx['txid'])
txid = _tx['txid']
verb_str = BlockchainSpider.check_script(_tx... | 1,047,002 |
Checks for the from, to, and piece address of a SPOOL transaction.
Args:
tx (dict): Transaction payload, as returned by
:meth:`transactions.Transactions.get()`.
.. note:: Formats as returned by JSON-RPC API
``decoderawtransaction`` have yet to be supported.
... | def _get_addresses(tx):
from_address = set([vin['address'] for vin in tx['vins']])
if len(from_address) != 1:
raise InvalidTransactionError("Transaction should have inputs " \
"from only one address {}".format(from_address))
# order... | 1,047,005 |
Convert a string representation of the time (as returned by
blockr.io api) into unix timestamp.
Args:
time_utc_str (str): String representation of the time, with the
format: `'%Y-%m-%dT%H:%M:%S %Z'`.
Returns:
int: Unix timestamp. | def _get_time_utc(time_utc_str):
dt = datetime.strptime(time_utc_str, TIME_FORMAT)
return int(calendar.timegm(dt.utctimetuple())) | 1,047,006 |
Mangle names.
Args:
toplevel: defaults to False. Defines if global
scope should be mangled or not. | def mangle(tree, toplevel=False):
sym_table = SymbolTable()
visitor = ScopeTreeVisitor(sym_table)
visitor.visit(tree)
fill_scope_references(tree)
mangle_scope_tree(sym_table.globals, toplevel)
mangler = NameManglerVisitor()
mangler.visit(tree) | 1,047,096 |
Initialize.
Args:
settings (dict): Settings used to create a :class:`Connection` instance
room (int): Room
queue (:class:`multiprocessing.Queue`): Queue to share data between processes
files (dict): Dictionary, where key is the field name, and value is the path | def __init__(self, settings, room, queue, files):
Process.__init__(self)
self._room = room
self._queue = queue
self._files = files
self._data = {}
self._connection = Connection.create_from_settings(settings)
self._reactor = None
self._producer = N... | 1,047,448 |
Add POST data.
Args:
data (dict): key => value dictionary | def add_data(self, data):
if not self._data:
self._data = {}
self._data.update(data) | 1,047,449 |
save GFS grib file to DATA_PATH.
Args:
dataset(function): naming convention function. eg. pgrb2
timestamp(datetime): ???
path(str): if None defaults to DATA_PATH
products(list): TMP, etc. if None downloads all.
layers(list): surface, etc. if None downloads all.
offs... | def download(timestamp, dataset, path=None, products=None,
levels=None, offset=0):
if path is None:
path = DATA_PATH
closest = timestamp.hour//6*6
filename = dataset(closest, offset)
gfs_timestamp = '%s%02d' % (timestamp.strftime('%Y%m%d'), closest)
url = baseurl(gfs_times... | 1,047,616 |
get message index of components for urllib2.
Args:
url(string):
Returns:
list: messages | def message_index(index_url):
idx = csv.reader(urllib2.urlopen(index_url), delimiter=':')
messages = []
for line in idx:
messages.append(line)
return messages | 1,047,617 |
Initialize.
Args:
campfire (:class:`Campfire`): Campfire Instance
Kwargs:
data (dict): Entity data | def __init__(self, campfire, data=None):
super(CampfireEntity, self).__init__(data)
self._campfire = campfire
self._connection = None
if self._campfire:
self._connection = self._campfire.get_connection() | 1,047,749 |
Set entity data
Args:
data (dict): Entity data
datetime_fields (array): Fields that should be parsed as datetimes | def set_data(self, data={}, datetime_fields=[]):
if datetime_fields:
for field in datetime_fields:
if field in data:
data[field] = self._parse_datetime(data[field])
super(CampfireEntity, self).set_data(data) | 1,047,750 |
Parses a datetime string from "YYYY/MM/DD HH:MM:SS +HHMM" format
Args:
value (str): String
Returns:
datetime. Datetime | def _parse_datetime(self, value):
offset = 0
pattern = r"\s+([+-]{1}\d+)\Z"
matches = re.search(pattern, value)
if matches:
value = re.sub(pattern, '', value)
offset = datetime.timedelta(hours=int(matches.group(1))/100)
return datetime.datetime.st... | 1,047,751 |
Read the file content and load it as JSON.
Arguments:
file_name (:py:class:`str`): The filename.
Returns:
:py:class:`dict`: The loaded JSON data.
Raises:
:py:class:`FileNotFoundError`: If the file is not found. | def _read_file(file_name):
with open(file_name) as config_file:
data = json.load(config_file)
return data | 1,047,754 |
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient. | def add_dspam_headers(self, results):
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hnam... | 1,047,967 |
Validate a webfont settings and optionally fill missing ``csspart_path``
option.
Args:
webfont_settings (dict): Webfont settings (an item value from
``settings.ICOMOON_WEBFONTS``).
Returns:
dict: Webfont settings | def extend_webfont_settings(webfont_settings):
if not webfont_settings.get('fontdir_path', False):
raise IcomoonSettingsError(("Webfont settings miss the required key "
"item 'fontdir_path'"))
if not webfont_settings.get('csspart_path', False):
webfont_s... | 1,048,170 |
Returns next state fluent canonical name.
Args:
name (str): The current state fluent name.
Returns:
str: The next state fluent name. | def rename_next_state_fluent(name: str) -> str:
i = name.index('/')
functor = name[:i-1]
arity = name[i+1:]
return "{}/{}".format(functor, arity) | 1,048,272 |
Returns current state fluent canonical name.
Args:
name (str): The next state fluent name.
Returns:
str: The current state fluent name. | def rename_state_fluent(name: str) -> str:
i = name.index('/')
functor = name[:i]
arity = name[i+1:]
return "{}'/{}".format(functor, arity) | 1,048,273 |
Run the application using a simple WSGI server.
Arguments:
host (str, optional): Host on which to listen.
port (int, optional): Port number on which to listen. | def run(self, host='127.0.0.1', port=8080):
from wsgiref import simple_server
self._server = simple_server.make_server(host, port, self)
self._server.serve_forever() | 1,048,516 |
Decorator to add route for a request with any HTTP method.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
pattern (str): Routing pattern the path must match.
Returns:
function: Decorator function to add route. | def route(self, method, pattern):
def decorator(callback):
self._router.add(method, pattern, callback)
return callback
return decorator | 1,048,518 |
Respond to an HTTP request.
Arguments:
environ (dict): Dictionary of environment variables
start_response (callable): Callable to start HTTP response
Returns:
list: List containing a single sequence of bytes. | def __call__(self, environ, start_response):
self.request = Request(environ)
self.response = Response(start_response)
route = self._router.resolve(self.request.method,
self.request.path)
if route is not None:
callback, args, kwar... | 1,048,522 |
Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*. | def add(self, method, pattern, callback):
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == 'literal':
self._literal[method][pat] = callback
elif pat_type == 'wildcard':
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
... | 1,048,525 |
Check if there is at least one handler for *method*.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
Returns:
``True`` if there is at least one route defined for *method*,
``False`` otherwise | def contains_method(self, method):
return method in itertools.chain(self._literal, self._wildcard,
self._regex) | 1,048,526 |
Resolve a request to a route handler.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc. (type: str)
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
... | def resolve(self, method, path):
if method in self._literal and path in self._literal[method]:
return self._literal[method][path], [], {}
else:
return self._resolve_non_literal_route(method, path) | 1,048,527 |
Resolve a request to a wildcard or regex route handler.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (l... | def _resolve_non_literal_route(self, method, path):
for route_dict in (self._wildcard, self._regex):
if method in route_dict:
for route in reversed(route_dict[method]):
callback_data = route.match(path)
if callback_data is not None:
... | 1,048,528 |
Return a normalized form of the pattern.
Normalize the pattern by removing pattern type prefix if it
exists in the pattern. Then return the pattern type and the
pattern as a tuple of two strings.
Arguments:
pattern (str): Route pattern to match request paths
Returns:... | def _normalize_pattern(pattern):
if pattern.startswith('regex:'):
pattern_type = 'regex'
pattern = pattern[len('regex:'):]
elif pattern.startswith('wildcard:'):
pattern_type = 'wildcard'
pattern = pattern[len('wildcard:'):]
elif pattern.st... | 1,048,529 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.